summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/compiled_method.h21
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.cc1
-rw-r--r--compiler/oat_writer.cc42
-rw-r--r--compiler/oat_writer.h6
-rw-r--r--compiler/optimizing/code_generator.cc49
-rw-r--r--compiler/optimizing/code_generator.h17
-rw-r--r--compiler/optimizing/code_generator_arm.cc149
-rw-r--r--compiler/optimizing/code_generator_arm.h8
-rw-r--r--compiler/optimizing/code_generator_arm64.cc165
-rw-r--r--compiler/optimizing/code_generator_arm64.h14
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc140
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h8
-rw-r--r--compiler/optimizing/code_generator_mips.cc149
-rw-r--r--compiler/optimizing/code_generator_mips.h8
-rw-r--r--compiler/optimizing/code_generator_mips64.cc132
-rw-r--r--compiler/optimizing/code_generator_mips64.h8
-rw-r--r--compiler/optimizing/code_generator_x86.cc156
-rw-r--r--compiler/optimizing/code_generator_x86.h9
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc138
-rw-r--r--compiler/optimizing/code_generator_x86_64.h9
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_arm.cc15
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_mips.cc15
-rw-r--r--compiler/optimizing/graph_visualizer.cc5
-rw-r--r--compiler/optimizing/induction_var_range.cc8
-rw-r--r--compiler/optimizing/induction_var_range.h4
-rw-r--r--compiler/optimizing/inliner.cc15
-rw-r--r--compiler/optimizing/instruction_builder.cc64
-rw-r--r--compiler/optimizing/instruction_builder.h11
-rw-r--r--compiler/optimizing/intrinsics_mips.cc5
-rw-r--r--compiler/optimizing/load_store_elimination.cc4
-rw-r--r--compiler/optimizing/loop_optimization.cc166
-rw-r--r--compiler/optimizing/loop_optimization.h8
-rw-r--r--compiler/optimizing/nodes.cc24
-rw-r--r--compiler/optimizing/nodes.h124
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.cc3
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc2
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc33
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h1
-rw-r--r--compiler/optimizing/reference_type_propagation.cc4
-rw-r--r--compiler/optimizing/sharpening.cc66
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S194
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S207
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S176
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.cc2
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64_test.cc2
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S167
-rw-r--r--runtime/arch/quick_alloc_entrypoints.S34
-rw-r--r--runtime/arch/stub_test.cc12
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S271
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S203
-rw-r--r--runtime/class_linker.cc112
-rw-r--r--runtime/class_linker.h13
-rw-r--r--runtime/common_throws.cc8
-rw-r--r--runtime/dex_instruction.cc46
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h70
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc39
-rw-r--r--runtime/entrypoints/entrypoint_utils.h29
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc136
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h5
-rw-r--r--runtime/entrypoints/quick/quick_default_init_entrypoints.h1
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc72
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h8
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc120
-rw-r--r--runtime/entrypoints_order_test.cc12
-rw-r--r--runtime/gc/collector/concurrent_copying.cc21
-rw-r--r--runtime/gc/collector/concurrent_copying.h3
-rw-r--r--runtime/gc/collector/garbage_collector.h5
-rw-r--r--runtime/gc/collector/mark_compact.cc10
-rw-r--r--runtime/gc/collector/mark_compact.h3
-rw-r--r--runtime/gc/collector/mark_sweep.cc9
-rw-r--r--runtime/gc/collector/mark_sweep.h3
-rw-r--r--runtime/gc/collector/semi_space.cc7
-rw-r--r--runtime/gc/collector/semi_space.h3
-rw-r--r--runtime/gc/reference_processor.cc4
-rw-r--r--runtime/gc/reference_queue.cc10
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc5
-rw-r--r--runtime/interpreter/mterp/mterp.cc7
-rw-r--r--runtime/java_vm_ext.cc5
-rw-r--r--runtime/mirror/object_reference-inl.h9
-rw-r--r--runtime/mirror/object_reference.h3
-rw-r--r--runtime/monitor.cc6
-rw-r--r--runtime/native/dalvik_system_VMStack.cc12
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/oat_file.cc6
-rw-r--r--runtime/openjdkjvmti/Android.bp1
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc81
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc467
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h226
-rw-r--r--runtime/openjdkjvmti/ti_stack.cc603
-rw-r--r--runtime/openjdkjvmti/ti_stack.h23
-rw-r--r--runtime/openjdkjvmti/ti_thread.cc446
-rw-r--r--runtime/openjdkjvmti/ti_thread.h56
-rw-r--r--runtime/runtime.cc45
-rw-r--r--runtime/thread.cc4
-rw-r--r--runtime/thread.h13
-rw-r--r--runtime/verifier/method_verifier.cc9
-rw-r--r--test/129-ThreadGetId/expected.txt1
-rw-r--r--test/129-ThreadGetId/src/Main.java39
-rw-r--r--test/494-checker-instanceof-tests/src/Main.java6
-rw-r--r--test/496-checker-inlining-class-loader/src/Main.java19
-rw-r--r--test/529-checker-unresolved/src/Main.java4
-rw-r--r--test/552-checker-sharpening/src/Main.java30
-rw-r--r--test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali6
-rw-r--r--test/559-checker-irreducible-loop/src/Main.java3
-rw-r--r--test/566-polymorphic-inlining/polymorphic_inline.cc5
-rw-r--r--test/621-checker-new-instance/expected.txt0
-rw-r--r--test/621-checker-new-instance/info.txt1
-rw-r--r--test/621-checker-new-instance/src/Main.java53
-rw-r--r--test/627-checker-unroll/expected.txt1
-rw-r--r--test/627-checker-unroll/info.txt1
-rw-r--r--test/627-checker-unroll/src/Main.java119
-rw-r--r--test/633-checker-rtp-getclass/expected.txt3
-rw-r--r--test/633-checker-rtp-getclass/info.txt3
-rw-r--r--test/633-checker-rtp-getclass/src/Main.java76
-rw-r--r--test/903-hello-tagging/tagging.cc14
-rw-r--r--test/903-hello-tagging/tagging.h30
-rw-r--r--test/904-object-allocation/tracking.cc15
-rw-r--r--test/905-object-free/tracking_free.cc14
-rw-r--r--test/906-iterate-heap/iterate_heap.cc14
-rw-r--r--test/906-iterate-heap/iterate_heap.h30
-rw-r--r--test/907-get-loaded-classes/get_loaded_classes.cc14
-rw-r--r--test/907-get-loaded-classes/get_loaded_classes.h30
-rw-r--r--test/908-gc-start-finish/gc_callbacks.cc14
-rw-r--r--test/908-gc-start-finish/gc_callbacks.h30
-rw-r--r--test/909-attach-agent/expected.txt8
-rwxr-xr-xtest/909-attach-agent/run10
-rw-r--r--test/910-methods/methods.cc14
-rw-r--r--test/910-methods/methods.h30
-rw-r--r--test/911-get-stack-trace/expected.txt978
-rw-r--r--test/911-get-stack-trace/src/AllTraces.java80
-rw-r--r--test/911-get-stack-trace/src/ControlData.java (renamed from test/918-fields/fields.h)23
-rw-r--r--test/911-get-stack-trace/src/Frames.java133
-rw-r--r--test/911-get-stack-trace/src/Main.java154
-rw-r--r--test/911-get-stack-trace/src/OtherThread.java82
-rw-r--r--test/911-get-stack-trace/src/PrintThread.java70
-rw-r--r--test/911-get-stack-trace/src/Recurse.java58
-rw-r--r--test/911-get-stack-trace/src/SameThread.java (renamed from test/904-object-allocation/tracking.h)29
-rw-r--r--test/911-get-stack-trace/src/ThreadListTraces.java71
-rw-r--r--test/911-get-stack-trace/stack_trace.cc163
-rw-r--r--test/911-get-stack-trace/stack_trace.h30
-rw-r--r--test/912-classes/classes.cc14
-rw-r--r--test/912-classes/classes.h30
-rw-r--r--test/913-heaps/heaps.cc14
-rw-r--r--test/918-fields/fields.cc14
-rw-r--r--test/920-objects/objects.cc14
-rw-r--r--test/920-objects/objects.h30
-rw-r--r--test/921-hello-failure/expected.txt8
-rw-r--r--test/921-hello-failure/src/CommonClassDefinition.java (renamed from test/922-properties/properties.h)23
-rw-r--r--test/921-hello-failure/src/Main.java21
-rw-r--r--test/921-hello-failure/src/MultiRedef.java100
-rw-r--r--test/922-properties/properties.cc14
-rw-r--r--test/923-monitors/monitors.cc14
-rwxr-xr-xtest/924-threads/build17
-rw-r--r--test/924-threads/expected.txt31
-rw-r--r--test/924-threads/info.txt1
-rwxr-xr-xtest/924-threads/run19
-rw-r--r--test/924-threads/src/Main.java232
-rw-r--r--test/924-threads/threads.cc124
-rwxr-xr-xtest/926-multi-obsolescence/build17
-rw-r--r--test/926-multi-obsolescence/expected.txt15
-rw-r--r--test/926-multi-obsolescence/info.txt2
-rwxr-xr-xtest/926-multi-obsolescence/run19
-rw-r--r--test/926-multi-obsolescence/src/CommonClassDefinition.java (renamed from test/923-monitors/monitors.h)23
-rw-r--r--test/926-multi-obsolescence/src/Main.java128
-rw-r--r--test/926-multi-obsolescence/src/Transform.java (renamed from test/905-object-free/tracking_free.h)28
-rw-r--r--test/926-multi-obsolescence/src/Transform2.java (renamed from test/913-heaps/heaps.h)21
-rwxr-xr-xtest/953-invoke-polymorphic-compiler/build25
-rw-r--r--test/953-invoke-polymorphic-compiler/expected.txt25
-rw-r--r--test/953-invoke-polymorphic-compiler/info.txt3
-rwxr-xr-xtest/953-invoke-polymorphic-compiler/run20
-rw-r--r--test/953-invoke-polymorphic-compiler/src/Main.java374
-rw-r--r--test/957-methodhandle-transforms/src/Main.java103
-rw-r--r--test/Android.bp1
-rw-r--r--test/Android.run-test.mk21
-rw-r--r--test/common/runtime_state.cc5
-rwxr-xr-xtest/etc/run-test-jar44
-rwxr-xr-xtest/run-test2
-rw-r--r--test/ti-agent/common_helper.cc107
-rw-r--r--test/ti-agent/common_load.cc65
179 files changed, 7042 insertions, 3086 deletions
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index bbf9eee0e5..e2a0942724 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -176,6 +176,7 @@ class LinkerPatch {
kCallRelative, // NOTE: Actual patching is instruction_set-dependent.
kType,
kTypeRelative, // NOTE: Actual patching is instruction_set-dependent.
+ kTypeBssEntry, // NOTE: Actual patching is instruction_set-dependent.
kString,
kStringRelative, // NOTE: Actual patching is instruction_set-dependent.
kStringBssEntry, // NOTE: Actual patching is instruction_set-dependent.
@@ -228,6 +229,16 @@ class LinkerPatch {
return patch;
}
+ static LinkerPatch TypeBssEntryPatch(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t target_type_idx) {
+ LinkerPatch patch(literal_offset, Type::kTypeBssEntry, target_dex_file);
+ patch.type_idx_ = target_type_idx;
+ patch.pc_insn_offset_ = pc_insn_offset;
+ return patch;
+ }
+
static LinkerPatch StringPatch(size_t literal_offset,
const DexFile* target_dex_file,
uint32_t target_string_idx) {
@@ -282,6 +293,7 @@ class LinkerPatch {
switch (GetType()) {
case Type::kCallRelative:
case Type::kTypeRelative:
+ case Type::kTypeBssEntry:
case Type::kStringRelative:
case Type::kStringBssEntry:
case Type::kDexCacheArray:
@@ -299,12 +311,16 @@ class LinkerPatch {
}
const DexFile* TargetTypeDexFile() const {
- DCHECK(patch_type_ == Type::kType || patch_type_ == Type::kTypeRelative);
+ DCHECK(patch_type_ == Type::kType ||
+ patch_type_ == Type::kTypeRelative ||
+ patch_type_ == Type::kTypeBssEntry);
return target_dex_file_;
}
dex::TypeIndex TargetTypeIndex() const {
- DCHECK(patch_type_ == Type::kType || patch_type_ == Type::kTypeRelative);
+ DCHECK(patch_type_ == Type::kType ||
+ patch_type_ == Type::kTypeRelative ||
+ patch_type_ == Type::kTypeBssEntry);
return dex::TypeIndex(type_idx_);
}
@@ -334,6 +350,7 @@ class LinkerPatch {
uint32_t PcInsnOffset() const {
DCHECK(patch_type_ == Type::kTypeRelative ||
+ patch_type_ == Type::kTypeBssEntry ||
patch_type_ == Type::kStringRelative ||
patch_type_ == Type::kStringBssEntry ||
patch_type_ == Type::kDexCacheArray);
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 4a9de7f3d1..79e1785e91 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -224,6 +224,7 @@ void Arm64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
} else {
// LDR/STR 32-bit or 64-bit with imm12 == 0 (unset).
DCHECK(patch.GetType() == LinkerPatch::Type::kDexCacheArray ||
+ patch.GetType() == LinkerPatch::Type::kTypeBssEntry ||
patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType();
DCHECK_EQ(insn & 0xbfbffc00, 0xb9000000) << std::hex << insn;
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index a9da09c82c..de5af97b9a 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -296,6 +296,7 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCo
bss_start_(0u),
bss_size_(0u),
bss_roots_offset_(0u),
+ bss_type_entries_(),
bss_string_entries_(),
oat_data_offset_(0u),
oat_header_(nullptr),
@@ -585,7 +586,7 @@ void OatWriter::PrepareLayout(linker::MultiOatRelativePatcher* relative_patcher)
}
oat_size_ = offset;
- if (!HasBootImage()) {
+ {
TimingLogger::ScopedTiming split("InitBssLayout", timings_);
InitBssLayout(instruction_set);
}
@@ -847,6 +848,10 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
if (!patch.IsPcRelative()) {
writer_->absolute_patch_locations_.push_back(base_loc + patch.LiteralOffset());
}
+ if (patch.GetType() == LinkerPatch::Type::kTypeBssEntry) {
+ TypeReference ref(patch.TargetTypeDexFile(), patch.TargetTypeIndex());
+ writer_->bss_type_entries_.Overwrite(ref, /* placeholder */ 0u);
+ }
if (patch.GetType() == LinkerPatch::Type::kStringBssEntry) {
StringReference ref(patch.TargetStringDexFile(), patch.TargetStringIndex());
writer_->bss_string_entries_.Overwrite(ref, /* placeholder */ 0u);
@@ -1185,6 +1190,15 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
target_offset);
break;
}
+ case LinkerPatch::Type::kTypeBssEntry: {
+ TypeReference ref(patch.TargetTypeDexFile(), patch.TargetTypeIndex());
+ uint32_t target_offset = writer_->bss_type_entries_.Get(ref);
+ writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
+ patch,
+ offset_ + literal_offset,
+ target_offset);
+ break;
+ }
case LinkerPatch::Type::kCall: {
uint32_t target_offset = GetTargetOffset(patch);
PatchCodeAddress(&patched_code_, literal_offset, target_offset);
@@ -1619,20 +1633,34 @@ size_t OatWriter::InitOatCodeDexFiles(size_t offset) {
}
void OatWriter::InitBssLayout(InstructionSet instruction_set) {
- DCHECK(!HasBootImage());
+ if (HasBootImage()) {
+ DCHECK(bss_string_entries_.empty());
+ if (bss_type_entries_.empty()) {
+ // Nothing to put to the .bss section.
+ return;
+ }
+ }
// Allocate space for app dex cache arrays in the .bss section.
bss_start_ = RoundUp(oat_size_, kPageSize);
- PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
bss_size_ = 0u;
- for (const DexFile* dex_file : *dex_files_) {
- dex_cache_arrays_offsets_.Put(dex_file, bss_start_ + bss_size_);
- DexCacheArraysLayout layout(pointer_size, dex_file);
- bss_size_ += layout.Size();
+ if (!HasBootImage()) {
+ PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
+ for (const DexFile* dex_file : *dex_files_) {
+ dex_cache_arrays_offsets_.Put(dex_file, bss_start_ + bss_size_);
+ DexCacheArraysLayout layout(pointer_size, dex_file);
+ bss_size_ += layout.Size();
+ }
}
bss_roots_offset_ = bss_size_;
+ // Prepare offsets for .bss Class entries.
+ for (auto& entry : bss_type_entries_) {
+ DCHECK_EQ(entry.second, 0u);
+ entry.second = bss_start_ + bss_size_;
+ bss_size_ += sizeof(GcRoot<mirror::Class>);
+ }
// Prepare offsets for .bss String entries.
for (auto& entry : bss_string_entries_) {
DCHECK_EQ(entry.second, 0u);
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 8d087f4f91..db84166ad3 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -31,6 +31,7 @@
#include "os.h"
#include "safe_map.h"
#include "string_reference.h"
+#include "utils/type_reference.h"
namespace art {
@@ -372,6 +373,11 @@ class OatWriter {
// The offset of the GC roots in .bss section.
size_t bss_roots_offset_;
+ // Map for allocating Class entries in .bss. Indexed by TypeReference for the source
+ // type in the dex file with the "type value comparator" for deduplication. The value
+ // is the target offset for patching, starting at `bss_start_ + bss_roots_offset_`.
+ SafeMap<TypeReference, size_t, TypeReferenceValueComparator> bss_type_entries_;
+
// Map for allocating String entries in .bss. Indexed by StringReference for the source
// string in the dex file with the "string value comparator" for deduplication. The value
// is the target offset for patching, starting at `bss_start_ + bss_roots_offset_`.
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8ee0a6bbdb..70c2738010 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -367,6 +367,12 @@ void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invok
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
}
+void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
+ MoveConstant(invoke->GetLocations()->GetTemp(0), static_cast<int32_t>(invoke->GetType()));
+ QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
+ InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
+}
+
void CodeGenerator::CreateUnresolvedFieldLocationSummary(
HInstruction* field_access,
Primitive::Type field_type,
@@ -491,31 +497,34 @@ void CodeGenerator::GenerateUnresolvedFieldAccess(
}
}
-// TODO: Remove argument `code_generator_supports_read_barrier` when
-// all code generators have read barrier support.
-void CodeGenerator::CreateLoadClassLocationSummary(HLoadClass* cls,
- Location runtime_type_index_location,
- Location runtime_return_location,
- bool code_generator_supports_read_barrier) {
- ArenaAllocator* allocator = cls->GetBlock()->GetGraph()->GetArena();
- LocationSummary::CallKind call_kind = cls->NeedsAccessCheck()
- ? LocationSummary::kCallOnMainOnly
- : (((code_generator_supports_read_barrier && kEmitCompilerReadBarrier) ||
- cls->CanCallRuntime())
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall);
- LocationSummary* locations = new (allocator) LocationSummary(cls, call_kind);
+void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
+ Location runtime_type_index_location,
+ Location runtime_return_location) {
+ DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
+ DCHECK_EQ(cls->InputCount(), 1u);
+ LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ cls, LocationSummary::kCallOnMainOnly);
+ locations->SetInAt(0, Location::NoLocation());
+ locations->AddTemp(runtime_type_index_location);
+ locations->SetOut(runtime_return_location);
+}
+
+void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
+ DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
+ LocationSummary* locations = cls->GetLocations();
+ MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
if (cls->NeedsAccessCheck()) {
- locations->SetInAt(0, Location::NoLocation());
- locations->AddTemp(runtime_type_index_location);
- locations->SetOut(runtime_return_location);
+ CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+ InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
+ } else if (cls->MustGenerateClinitCheck()) {
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ InvokeRuntime(kQuickInitializeStaticStorage, cls, cls->GetDexPc());
} else {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ InvokeRuntime(kQuickInitializeType, cls, cls->GetDexPc());
}
}
-
void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
// The DCHECKS below check that a register is not specified twice in
// the summary. The out location can overlap with an input, so we need
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 6366b9838f..38d532e1e9 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -426,12 +426,12 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
}
- // Perfoms checks pertaining to an InvokeRuntime call.
+ // Performs checks pertaining to an InvokeRuntime call.
void ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
SlowPathCode* slow_path);
- // Perfoms checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call.
+ // Performs checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call.
static void ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
SlowPathCode* slow_path);
@@ -495,6 +495,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
+ void GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke);
+
void CreateUnresolvedFieldLocationSummary(
HInstruction* field_access,
Primitive::Type field_type,
@@ -507,11 +509,10 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
uint32_t dex_pc,
const FieldAccessCallingConvention& calling_convention);
- // TODO: This overlaps a bit with MoveFromReturnRegister. Refactor for a better design.
- static void CreateLoadClassLocationSummary(HLoadClass* cls,
- Location runtime_type_index_location,
- Location runtime_return_location,
- bool code_generator_supports_read_barrier = false);
+ static void CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
+ Location runtime_type_index_location,
+ Location runtime_return_location);
+ void GenerateLoadClassRuntimeCall(HLoadClass* cls);
static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
@@ -521,7 +522,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
virtual void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path) = 0;
+ SlowPathCode* slow_path = nullptr) = 0;
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 3009103ac7..ef4bd1e59d 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -371,22 +371,23 @@ class LoadClassSlowPathARM : public SlowPathCodeARM {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeARM(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeARM(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- arm_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ arm_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -400,6 +401,23 @@ class LoadClassSlowPathARM : public SlowPathCodeARM {
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ CodeGeneratorARM::PcRelativePatchInfo* labels =
+ arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(IP, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(IP, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(IP, IP, ShifterOperand(PC));
+ __ str(locations->Out().AsRegister<Register>(), Address(IP));
+ }
__ b(GetExitLabel());
}
@@ -409,10 +427,6 @@ class LoadClassSlowPathARM : public SlowPathCodeARM {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -430,7 +444,7 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
Register out = locations->Out().AsRegister<Register>();
Register temp = locations->GetTemp(0).AsRegister<Register>();
constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
@@ -449,7 +463,7 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
__ mov(entry_address, ShifterOperand(temp));
}
- __ LoadImmediate(calling_convention.GetRegisterAt(0), string_index);
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), string_index.index_);
arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -1208,6 +1222,7 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
@@ -2370,6 +2385,14 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderARM::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderARM::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -3936,7 +3959,6 @@ void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(Location::RegisterLocation(R0));
}
@@ -3954,7 +3976,7 @@ void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
@@ -5709,17 +5731,11 @@ HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kJitTableAddress:
- break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops()) {
- return HLoadClass::LoadKind::kDexCacheViaMethod;
- }
+ break;
+ case HLoadClass::LoadKind::kJitTableAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
@@ -5728,15 +5744,16 @@ HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
}
void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(R0),
- /* code_generator_supports_read_barrier */ true);
+ Location::RegisterLocation(R0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -5747,24 +5764,21 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -5772,7 +5786,7 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -5786,12 +5800,14 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ LoadLiteral(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
CodeGeneratorARM::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
@@ -5810,6 +5826,19 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ CodeGeneratorARM::PcRelativePatchInfo* labels =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(out, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(out, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(out, out, ShifterOperand(PC));
+ GenerateGcRootFieldLoad(cls, out_loc, out, 0, kCompilerReadBarrierOption);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
__ LoadLiteral(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
@@ -5818,28 +5847,9 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- Register base_reg = locations->InAt(0).AsRegister<Register>();
- HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
- int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
- // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- Register current_method = locations->InAt(0).AsRegister<Register>();
- __ LoadFromOffset(kLoadWord,
- out,
- current_method,
- ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -5947,6 +5957,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) NO_THREAD_S
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out, codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
load->GetStringIndex()));
return; // No dex cache slow path.
@@ -5954,7 +5965,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) NO_THREAD_S
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARM::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
__ BindTrackedLabel(&labels->movw_label);
__ movw(out, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->movt_label);
@@ -5974,7 +5985,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) NO_THREAD_S
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
Register temp = locations->GetTemp(0).AsRegister<Register>();
CodeGeneratorARM::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
__ BindTrackedLabel(&labels->movw_label);
__ movw(temp, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->movt_label);
@@ -7280,8 +7291,8 @@ void CodeGeneratorARM::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp
}
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeTypePatch(
@@ -7289,6 +7300,11 @@ CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeTypePatch(
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -7367,6 +7383,7 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
boot_image_address_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
@@ -7381,12 +7398,17 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
Literal* literal = entry.second;
@@ -7396,8 +7418,6 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
target_type.dex_file,
target_type.type_index.index_));
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
- linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
Literal* literal = entry.second;
@@ -7405,6 +7425,7 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
uint32_t literal_offset = literal->GetLabel()->Position();
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
Literal* CodeGeneratorARM::DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map) {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index d5968e0764..bd237e96a5 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -481,8 +481,10 @@ class CodeGeneratorARM : public CodeGenerator {
Label add_pc_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
@@ -635,8 +637,10 @@ class CodeGeneratorARM : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
TypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 494c281590..8c954e3d7c 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -276,22 +276,23 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeARM64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeARM64(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- arm64_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ arm64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -302,11 +303,32 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
- Primitive::Type type = at_->GetType();
+ Primitive::Type type = instruction_->GetType();
arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
-
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ UseScratchRegisterScope temps(arm64_codegen->GetVIXLAssembler());
+ Register temp = temps.AcquireX();
+ const DexFile& dex_file = cls_->GetDexFile();
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the ADRP in the fast path, so that we
+ // can avoid the ADRP here.
+ vixl::aarch64::Label* adrp_label =
+ arm64_codegen->NewBssEntryTypePatch(dex_file, type_index);
+ arm64_codegen->EmitAdrpPlaceholder(adrp_label, temp);
+ vixl::aarch64::Label* strp_label =
+ arm64_codegen->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
+ {
+ SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
+ __ Bind(strp_label);
+ __ str(RegisterFrom(locations->Out(), Primitive::kPrimNot),
+ MemOperand(temp, /* offset placeholder */ 0));
+ }
+ }
__ B(GetExitLabel());
}
@@ -316,10 +338,6 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -349,8 +367,8 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
- __ Mov(calling_convention.GetRegisterAt(0).W(), string_index);
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ Mov(calling_convention.GetRegisterAt(0).W(), string_index.index_);
arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
Primitive::Type type = instruction_->GetType();
@@ -1154,6 +1172,7 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
@@ -4080,11 +4099,20 @@ void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location te
__ Blr(lr);
}
+void LocationsBuilderARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(
const DexFile& dex_file,
- uint32_t string_index,
+ dex::StringIndex string_index,
vixl::aarch64::Label* adrp_label) {
- return NewPcRelativePatch(dex_file, string_index, adrp_label, &pc_relative_string_patches_);
+ return
+ NewPcRelativePatch(dex_file, string_index.index_, adrp_label, &pc_relative_string_patches_);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(
@@ -4094,6 +4122,13 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(
return NewPcRelativePatch(dex_file, type_index.index_, adrp_label, &pc_relative_type_patches_);
}
+vixl::aarch64::Label* CodeGeneratorARM64::NewBssEntryTypePatch(
+ const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ vixl::aarch64::Label* adrp_label) {
+ return NewPcRelativePatch(dex_file, type_index.index_, adrp_label, &type_bss_entry_patches_);
+}
+
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file,
uint32_t element_offset,
@@ -4200,6 +4235,7 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
pc_relative_type_patches_.size() +
+ type_bss_entry_patches_.size() +
boot_image_address_patches_.size();
linker_patches->reserve(size);
for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
@@ -4216,12 +4252,17 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
vixl::aarch64::Literal<uint32_t>* literal = entry.second;
@@ -4229,13 +4270,12 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
target_type.dex_file,
target_type.type_index.index_));
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
- linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
vixl::aarch64::Literal<uint32_t>* literal = entry.second;
linker_patches->push_back(LinkerPatch::RecordPosition(literal->GetOffset()));
}
+ DCHECK_EQ(size, linker_patches->size());
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value,
@@ -4298,12 +4338,12 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
+ case HLoadClass::LoadKind::kBssEntry:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJitCompilation());
- break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -4311,15 +4351,16 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
}
void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
LocationFrom(calling_convention.GetRegisterAt(0)),
- LocationFrom(vixl::aarch64::x0),
- /* code_generator_supports_read_barrier */ true);
+ LocationFrom(vixl::aarch64::x0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -4330,21 +4371,19 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(cls->GetLocations()->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
@@ -4353,7 +4392,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -4391,55 +4430,40 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
__ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress()));
break;
}
- case HLoadClass::LoadKind::kJitTableAddress: {
- __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
- cls->GetTypeIndex(),
- cls->GetAddress()));
- GenerateGcRootFieldLoad(cls,
- out_loc,
- out.X(),
- /* offset */ 0,
- /* fixup_label */ nullptr,
- kCompilerReadBarrierOption);
- break;
- }
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- // Add ADRP with its PC-relative DexCache access patch.
+ case HLoadClass::LoadKind::kBssEntry: {
+ // Add ADRP with its PC-relative Class .bss entry patch.
const DexFile& dex_file = cls->GetDexFile();
- uint32_t element_offset = cls->GetDexCacheElementOffset();
- vixl::aarch64::Label* adrp_label =
- codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
+ dex::TypeIndex type_index = cls->GetTypeIndex();
+ vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
- // Add LDR with its PC-relative DexCache access patch.
+ // Add LDR with its PC-relative Class patch.
vixl::aarch64::Label* ldr_label =
- codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
+ codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(cls,
- out_loc,
+ cls->GetLocations()->Out(),
out.X(),
- /* offset placeholder */ 0,
+ /* placeholder */ 0u,
ldr_label,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
+ kCompilerReadBarrierOption);
+ generate_null_check = true;
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- MemberOffset resolved_types_offset =
- ArtMethod::DexCacheResolvedTypesOffset(kArm64PointerSize);
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- Register current_method = InputRegisterAt(cls, 0);
- __ Ldr(out.X(), MemOperand(current_method, resolved_types_offset.Int32Value()));
- // /* GcRoot<mirror::Class> */ out = out[type_index]
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
+ cls->GetTypeIndex(),
+ cls->GetAddress()));
GenerateGcRootFieldLoad(cls,
out_loc,
out.X(),
- CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_),
+ /* offset */ 0,
/* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
+ kCompilerReadBarrierOption);
break;
}
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -4494,11 +4518,11 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
}
return desired_string_load_kind;
}
@@ -4542,7 +4566,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
// Add ADRP with its PC-relative String patch.
const DexFile& dex_file = load->GetDexFile();
- uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
@@ -4562,7 +4586,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
case HLoadString::LoadKind::kBssEntry: {
// Add ADRP with its PC-relative String .bss entry patch.
const DexFile& dex_file = load->GetDexFile();
- uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
Register temp = temps.AcquireX();
@@ -4744,7 +4768,6 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(LocationFrom(kArtMethodRegister));
} else {
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -4762,7 +4785,7 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index d6a5f9d1fa..c7a06145e4 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -540,7 +540,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
// ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
// to the associated ADRP patch label).
vixl::aarch64::Label* NewPcRelativeStringPatch(const DexFile& dex_file,
- uint32_t string_index,
+ dex::StringIndex string_index,
vixl::aarch64::Label* adrp_label = nullptr);
// Add a new PC-relative type patch for an instruction and return the label
@@ -551,6 +551,14 @@ class CodeGeneratorARM64 : public CodeGenerator {
dex::TypeIndex type_index,
vixl::aarch64::Label* adrp_label = nullptr);
+ // Add a new .bss entry type patch for an instruction and return the label
+ // to be bound before the instruction. The instruction will be either the
+ // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
+ // to the associated ADRP patch label).
+ vixl::aarch64::Label* NewBssEntryTypePatch(const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ vixl::aarch64::Label* adrp_label = nullptr);
+
// Add a new PC-relative dex cache array patch for an instruction and return
// the label to be bound before the instruction. The instruction will be
// either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
@@ -744,8 +752,10 @@ class CodeGeneratorARM64 : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
TypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index b1f6d599ab..83c289e866 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -394,22 +394,23 @@ class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
LoadClassSlowPathARMVIXL(HLoadClass* cls, HInstruction* at, uint32_t dex_pc, bool do_clinit)
- : SlowPathCodeARMVIXL(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeARMVIXL(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- __ Mov(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- arm_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ arm_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -423,6 +424,18 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
arm_codegen->Move32(locations->Out(), LocationFrom(r0));
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ arm_codegen->EmitMovwMovtPlaceholder(labels, ip);
+ __ Str(OutputRegister(cls_), MemOperand(ip));
+ }
__ B(GetExitLabel());
}
@@ -432,10 +445,6 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -454,7 +463,7 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
vixl32::Register out = OutputRegister(load);
vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
@@ -473,7 +482,7 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ Mov(entry_address, temp);
}
- __ Mov(calling_convention.GetRegisterAt(0), string_index);
+ __ Mov(calling_convention.GetRegisterAt(0), string_index.index_);
arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -1252,6 +1261,7 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
@@ -2445,6 +2455,14 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
}
}
+void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -3948,7 +3966,6 @@ void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(LocationFrom(r0));
}
@@ -3970,7 +3987,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
@@ -5790,17 +5807,11 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kJitTableAddress:
- break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops()) {
- return HLoadClass::LoadKind::kDexCacheViaMethod;
- }
+ break;
+ case HLoadClass::LoadKind::kJitTableAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
@@ -5809,15 +5820,16 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
}
void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
LocationFrom(calling_convention.GetRegisterAt(0)),
- LocationFrom(r0),
- /* code_generator_supports_read_barrier */ true);
+ LocationFrom(r0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -5828,24 +5840,21 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
vixl32::Register out = OutputRegister(cls);
@@ -5853,7 +5862,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -5867,12 +5876,14 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ Ldr(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
@@ -5886,6 +5897,14 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
__ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(cls, out_loc, out, 0, kCompilerReadBarrierOption);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
@@ -5894,30 +5913,9 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- vixl32::Register base_reg = InputRegisterAt(cls, 0);
- HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
- int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
- // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- vixl32::Register current_method = InputRegisterAt(cls, 0);
- const int32_t resolved_types_offset =
- ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value();
- GetAssembler()->LoadFromOffset(kLoadWord, out, current_method, resolved_types_offset);
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- default:
- TODO_VIXL32(FATAL);
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -6039,7 +6037,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
return; // No dex cache slow path.
}
@@ -6054,7 +6052,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, temp);
GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
@@ -7398,8 +7396,8 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall(HInvokeVirtual* invoke, Location
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeTypePatch(
@@ -7407,6 +7405,11 @@ CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeTy
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -7500,6 +7503,7 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
boot_image_address_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
@@ -7514,12 +7518,17 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
VIXLUInt32Literal* literal = entry.second;
@@ -7529,8 +7538,6 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
target_type.dex_file,
target_type.type_index.index_));
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
- linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
VIXLUInt32Literal* literal = entry.second;
@@ -7538,6 +7545,7 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
uint32_t literal_offset = literal->GetLocation();
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal(
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 200a463c75..0f0a9540ba 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -562,8 +562,10 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
vixl::aarch32::Label add_pc_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
VIXLUInt32Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
@@ -731,8 +733,10 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
TypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 9af03e8153..1f4ff279e8 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -213,23 +213,24 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeMIPS(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeMIPS(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- mips_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ mips_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -240,11 +241,26 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
- Primitive::Type type = at_->GetType();
+ Primitive::Type type = instruction_->GetType();
mips_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
+ Register base = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
+ DCHECK_NE(out.AsRegister<Register>(), AT);
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ mips_codegen->EmitPcRelativeAddressPlaceholder(info, TMP, base);
+ __ StoreToOffset(kStoreWord, out.AsRegister<Register>(), TMP, 0);
+ }
__ B(GetExitLabel());
}
@@ -254,10 +270,6 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -281,8 +293,8 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
InvokeRuntimeCallingConvention calling_convention;
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex().index_;
- __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
+ const dex::StringIndex string_index = load->GetStringIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
Primitive::Type type = instruction_->GetType();
@@ -465,6 +477,7 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
@@ -1007,6 +1020,7 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
pc_relative_dex_cache_patches_.size() +
pc_relative_string_patches_.size() +
pc_relative_type_patches_.size() +
+ type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
boot_image_type_patches_.size() +
boot_image_address_patches_.size();
@@ -1014,13 +1028,16 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
for (const auto& entry : boot_image_string_patches_) {
const StringReference& target_string = entry.first;
@@ -1047,11 +1064,12 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeTypePatch(
@@ -1059,6 +1077,11 @@ CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeTypePatc
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -5154,6 +5177,14 @@ void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invo
}
}
+void LocationsBuilderMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
if (invoke->GetLocations()->Intrinsified()) {
IntrinsicCodeGeneratorMIPS intrinsic(codegen);
@@ -5186,14 +5217,14 @@ HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- fallback_load = false;
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
// TODO: implement.
fallback_load = true;
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ fallback_load = false;
+ break;
}
if (fallback_load) {
desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
@@ -5222,15 +5253,13 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
+ case HLoadClass::LoadKind::kBssEntry:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
fallback_load = true;
break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJitCompilation());
- // TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
fallback_load = false;
break;
@@ -5427,34 +5456,32 @@ void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
}
void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(V0),
- /* code_generator_supports_read_barrier */ false); // TODO: revisit this bool.
+ Location::RegisterLocation(V0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBssEntry:
if (codegen_->GetInstructionSetFeatures().IsR6()) {
break;
}
FALLTHROUGH_INTENDED;
- // We need an extra register for PC-relative dex cache accesses.
- case HLoadClass::LoadKind::kDexCachePcRelative:
case HLoadClass::LoadKind::kReferrersClass:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
locations->SetInAt(0, Location::RequiresRegister());
break;
default:
@@ -5464,15 +5491,14 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
}
void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
Register base_or_current_method_reg;
@@ -5480,12 +5506,11 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBssEntry:
base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
break;
- // We need an extra register for PC-relative dex cache accesses.
- case HLoadClass::LoadKind::kDexCachePcRelative:
case HLoadClass::LoadKind::kReferrersClass:
case HLoadClass::LoadKind::kDexCacheViaMethod:
base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
@@ -5508,14 +5533,14 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
@@ -5530,31 +5555,21 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
- case HLoadClass::LoadKind::kJitTableAddress: {
- LOG(FATAL) << "Unimplemented";
+ case HLoadClass::LoadKind::kBssEntry: {
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
+ __ LoadFromOffset(kLoadWord, out, out, 0);
+ generate_null_check = true;
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- HMipsDexCacheArraysBase* base = cls->InputAt(0)->AsMipsDexCacheArraysBase();
- int32_t offset =
- cls->GetDexCacheElementOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
- // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_or_current_method_reg, offset);
- generate_null_check = !cls->IsInDexCache();
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ LOG(FATAL) << "Unimplemented";
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- __ LoadFromOffset(kLoadWord,
- out,
- base_or_current_method_reg,
- ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset);
- generate_null_check = !cls->IsInDexCache();
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -5649,6 +5664,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
@@ -5657,7 +5673,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
return; // No dex cache slow path.
}
@@ -5673,7 +5689,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
__ LoadFromOffset(kLoadWord, out, out, 0);
SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
@@ -5903,7 +5919,6 @@ void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -5920,7 +5935,7 @@ void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 7b0812cb7b..c8fd325999 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -452,8 +452,10 @@ class CodeGeneratorMIPS : public CodeGenerator {
MipsLabel pc_rel_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
@@ -504,8 +506,10 @@ class CodeGeneratorMIPS : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
BootTypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 318d45528e..8215e1387a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -167,22 +167,23 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeMIPS64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeMIPS64(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- mips64_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ mips64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -193,11 +194,24 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
- Primitive::Type type = at_->GetType();
+ Primitive::Type type = instruction_->GetType();
mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ DCHECK_NE(out.AsRegister<GpuRegister>(), AT);
+ CodeGeneratorMIPS64::PcRelativePatchInfo* info =
+ mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info, AT);
+ __ Sw(out.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ }
__ Bc(GetExitLabel());
}
@@ -207,10 +221,6 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -234,8 +244,8 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
InvokeRuntimeCallingConvention calling_convention;
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
- __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
instruction_->GetDexPc(),
@@ -422,6 +432,7 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Save RA (containing the return address) to mimic Quick.
@@ -922,6 +933,7 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
pc_relative_dex_cache_patches_.size() +
pc_relative_string_patches_.size() +
pc_relative_type_patches_.size() +
+ type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
boot_image_type_patches_.size() +
boot_image_address_patches_.size();
@@ -929,13 +941,16 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
for (const auto& entry : boot_image_string_patches_) {
const StringReference& target_string = entry.first;
@@ -962,11 +977,12 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeTypePatch(
@@ -974,6 +990,11 @@ CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeType
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -3095,7 +3116,7 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(
Location root,
GpuRegister obj,
uint32_t offset) {
- // When handling HLoadClass::LoadKind::kDexCachePcRelative, the caller calls
+ // When handling PC-relative loads, the caller calls
// EmitPcRelativeAddressPlaceholderHigh() and then GenerateGcRootFieldLoad().
// The relative patcher expects the two methods to emit the following patchable
// sequence of instructions in this case:
@@ -3256,6 +3277,14 @@ void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* in
HandleInvoke(invoke);
}
+void LocationsBuilderMIPS64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
if (invoke->GetLocations()->Intrinsified()) {
IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
@@ -3314,14 +3343,14 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
+ case HLoadClass::LoadKind::kBssEntry:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
// TODO: implement.
fallback_load = true;
break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJitCompilation());
- break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -3474,38 +3503,36 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke)
}
void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- calling_convention.GetReturnLocation(Primitive::kPrimNot),
- /* code_generator_supports_read_barrier */ false);
+ calling_convention.GetReturnLocation(Primitive::kPrimNot));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
GpuRegister out = out_loc.AsRegister<GpuRegister>();
GpuRegister current_method_reg = ZERO;
@@ -3526,14 +3553,14 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
ArtMethod::DeclaringClassOffset().Int32Value());
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
@@ -3549,32 +3576,21 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
- case HLoadClass::LoadKind::kJitTableAddress: {
- LOG(FATAL) << "Unimplemented";
- break;
- }
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- uint32_t element_offset = cls->GetDexCacheElementOffset();
+ case HLoadClass::LoadKind::kBssEntry: {
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeDexCacheArrayPatch(cls->GetDexFile(), element_offset);
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
- // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(cls, out_loc, AT, /* placeholder */ 0x5678);
- generate_null_check = !cls->IsInDexCache();
+ __ Lwu(out, AT, /* placeholder */ 0x5678);
+ generate_null_check = true;
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- __ LoadFromOffset(kLoadDoubleword,
- out,
- current_method_reg,
- ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset);
- generate_null_check = !cls->IsInDexCache();
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ LOG(FATAL) << "Unimplemented";
+ break;
}
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -3638,6 +3654,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
@@ -3646,7 +3663,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
__ Daddiu(out, AT, /* placeholder */ 0x5678);
return; // No dex cache slow path.
@@ -3663,7 +3680,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
__ Lwu(out, AT, /* placeholder */ 0x5678);
SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
@@ -3844,7 +3861,6 @@ void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -3862,7 +3878,7 @@ void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 8ac919f47e..52b780c106 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -411,8 +411,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
Mips64Label pc_rel_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
PcRelativePatchInfo* NewPcRelativeCallPatch(const DexFile& dex_file,
@@ -469,8 +471,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
BootTypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8b4f060ef0..726513dfdd 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -225,8 +225,8 @@ class LoadStringSlowPathX86 : public SlowPathCode {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
- __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index));
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index.index_));
x86_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
@@ -254,21 +254,24 @@ class LoadClassSlowPathX86 : public SlowPathCode {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCode(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex().index_));
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(type_index.index_));
x86_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType,
- at_, dex_pc_, this);
+ instruction_,
+ dex_pc_,
+ this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -281,8 +284,17 @@ class LoadClassSlowPathX86 : public SlowPathCode {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
x86_codegen->Move32(out, Location::RegisterLocation(EAX));
}
-
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ Register method_address = locations->InAt(0).AsRegister<Register>();
+ __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
+ locations->Out().AsRegister<Register>());
+ Label* fixup_label = x86_codegen->NewTypeBssEntryPatch(cls_);
+ __ Bind(fixup_label);
+ }
__ jmp(GetExitLabel());
}
@@ -292,10 +304,6 @@ class LoadClassSlowPathX86 : public SlowPathCode {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -1009,7 +1017,8 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
constant_area_start_(-1),
@@ -2244,6 +2253,14 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderX86::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderX86::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -4150,7 +4167,6 @@ void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
}
@@ -4166,7 +4182,7 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
DCHECK(!codegen_->IsLeafMethod());
}
}
@@ -4594,9 +4610,15 @@ void CodeGeneratorX86::RecordBootStringPatch(HLoadString* load_string) {
__ Bind(&string_patches_.back().label);
}
-void CodeGeneratorX86::RecordTypePatch(HLoadClass* load_class) {
- type_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
- __ Bind(&type_patches_.back().label);
+void CodeGeneratorX86::RecordBootTypePatch(HLoadClass* load_class) {
+ boot_image_type_patches_.emplace_back(load_class->GetDexFile(),
+ load_class->GetTypeIndex().index_);
+ __ Bind(&boot_image_type_patches_.back().label);
+}
+
+Label* CodeGeneratorX86::NewTypeBssEntryPatch(HLoadClass* load_class) {
+ type_bss_entry_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
+ return &type_bss_entry_patches_.back().label;
}
Label* CodeGeneratorX86::NewStringBssEntryPatch(HLoadString* load_string) {
@@ -4633,7 +4655,8 @@ void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
pc_relative_dex_cache_patches_.size() +
simple_patches_.size() +
string_patches_.size() +
- type_patches_.size();
+ boot_image_type_patches_.size() +
+ type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -4642,24 +4665,26 @@ void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
} else if (GetCompilerOptions().GetCompilePic()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(boot_image_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
} else {
+ for (const PatchInfo<Label>& info : boot_image_type_patches_) {
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
+ linker_patches->push_back(LinkerPatch::TypePatch(literal_offset, &info.dex_file, info.index));
+ }
for (const PatchInfo<Label>& info : string_patches_) {
uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
linker_patches->push_back(
LinkerPatch::StringPatch(literal_offset, &info.dex_file, info.index));
}
}
- if (GetCompilerOptions().GetCompilePic()) {
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(type_patches_, linker_patches);
- } else {
- for (const PatchInfo<Label>& info : type_patches_) {
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::TypePatch(literal_offset, &info.dex_file, info.index));
- }
- }
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
+ DCHECK_EQ(size, linker_patches->size());
}
void CodeGeneratorX86::MarkGCCard(Register temp,
@@ -5978,7 +6003,7 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
DCHECK(GetCompilerOptions().GetCompilePic());
FALLTHROUGH_INTENDED;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT.
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
@@ -6000,15 +6025,16 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
}
void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(EAX),
- /* code_generator_supports_read_barrier */ true);
+ Location::RegisterLocation(EAX));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -6019,11 +6045,9 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_kind == HLoadClass::LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ load_kind == HLoadClass::LoadKind::kBssEntry) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
@@ -6040,14 +6064,14 @@ Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
}
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -6055,7 +6079,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -6070,16 +6094,18 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ movl(out, Immediate(/* placeholder */ 0));
- codegen_->RecordTypePatch(cls);
+ codegen_->RecordBootTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
- codegen_->RecordTypePatch(cls);
+ codegen_->RecordBootTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
@@ -6090,6 +6116,14 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
codegen_->RecordSimplePatch();
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ Register method_address = locations->InAt(0).AsRegister<Register>();
+ Address address(method_address, CodeGeneratorX86::kDummy32BitOffset);
+ Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewJitRootClassPatch(
@@ -6098,35 +6132,9 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- Register base_reg = locations->InAt(0).AsRegister<Register>();
- uint32_t offset = cls->GetDexCacheElementOffset();
- Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(cls->GetDexFile(), offset);
- // /* GcRoot<mirror::Class> */ out = *(base + offset) /* PC-relative */
- GenerateGcRootFieldLoad(cls,
- out_loc,
- Address(base_reg, CodeGeneratorX86::kDummy32BitOffset),
- fixup_label,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- Register current_method = locations->InAt(0).AsRegister<Register>();
- __ movl(out, Address(current_method,
- ArtMethod::DexCacheResolvedTypesOffset(kX86PointerSize).Int32Value()));
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- GenerateGcRootFieldLoad(cls,
- out_loc,
- Address(out,
- CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_)),
- /* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -6196,11 +6204,11 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
break;
case HLoadString::LoadKind::kBootImageAddress:
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
}
return desired_string_load_kind;
}
@@ -6251,11 +6259,13 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ movl(out, Immediate(/* placeholder */ 0));
codegen_->RecordBootStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
codegen_->RecordBootStringPatch(load);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index dd1628c867..b86d080aa6 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -412,7 +412,8 @@ class CodeGeneratorX86 : public CodeGenerator {
void RecordSimplePatch();
void RecordBootStringPatch(HLoadString* load_string);
- void RecordTypePatch(HLoadClass* load_class);
+ void RecordBootTypePatch(HLoadClass* load_class);
+ Label* NewTypeBssEntryPatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
Label* NewJitRootStringPatch(const DexFile& dex_file,
@@ -621,8 +622,10 @@ class CodeGeneratorX86 : public CodeGenerator {
ArenaDeque<Label> simple_patches_;
// String patch locations; type depends on configuration (app .bss or boot image PIC/non-PIC).
ArenaDeque<PatchInfo<Label>> string_patches_;
- // Type patch locations.
- ArenaDeque<PatchInfo<Label>> type_patches_;
+ // Type patch locations for boot image; type depends on configuration (boot image PIC/non-PIC).
+ ArenaDeque<PatchInfo<Label>> boot_image_type_patches_;
+ // Type patch locations for kBssEntry.
+ ArenaDeque<PatchInfo<Label>> type_bss_entry_patches_;
// Patches for string root accesses in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_string_patches_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d4bf18d176..1b19875eac 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -234,12 +234,12 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCode(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -249,7 +249,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)),
Immediate(cls_->GetTypeIndex().index_));
x86_64_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage : kQuickInitializeType,
- at_,
+ instruction_,
dex_pc_,
this);
if (do_clinit_) {
@@ -266,6 +266,15 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
+ locations->Out().AsRegister<CpuRegister>());
+ Label* fixup_label = x86_64_codegen->NewTypeBssEntryPatch(cls_);
+ __ Bind(fixup_label);
+ }
__ jmp(GetExitLabel());
}
@@ -275,10 +284,6 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -300,9 +305,9 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
// Custom calling convention: RAX serves as both input and output.
- __ movl(CpuRegister(RAX), Immediate(string_index));
+ __ movl(CpuRegister(RAX), Immediate(string_index.index_));
x86_64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
instruction_->GetDexPc(),
@@ -1079,9 +1084,15 @@ void CodeGeneratorX86_64::RecordBootStringPatch(HLoadString* load_string) {
__ Bind(&string_patches_.back().label);
}
-void CodeGeneratorX86_64::RecordTypePatch(HLoadClass* load_class) {
- type_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
- __ Bind(&type_patches_.back().label);
+void CodeGeneratorX86_64::RecordBootTypePatch(HLoadClass* load_class) {
+ boot_image_type_patches_.emplace_back(load_class->GetDexFile(),
+ load_class->GetTypeIndex().index_);
+ __ Bind(&boot_image_type_patches_.back().label);
+}
+
+Label* CodeGeneratorX86_64::NewTypeBssEntryPatch(HLoadClass* load_class) {
+ type_bss_entry_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
+ return &type_bss_entry_patches_.back().label;
}
Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
@@ -1118,7 +1129,8 @@ void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
pc_relative_dex_cache_patches_.size() +
simple_patches_.size() +
string_patches_.size() +
- type_patches_.size();
+ boot_image_type_patches_.size() +
+ type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -1127,13 +1139,17 @@ void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
} else {
- // These are always PC-relative, see GetSupportedLoadStringKind().
+ // These are always PC-relative, see GetSupportedLoadClassKind()/GetSupportedLoadStringKind().
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(boot_image_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
}
- // These are always PC-relative, see GetSupportedLoadClassKind().
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(type_patches_, linker_patches);
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
+ DCHECK_EQ(size, linker_patches->size());
}
void CodeGeneratorX86_64::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -1214,7 +1230,8 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
@@ -2423,6 +2440,14 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderX86_64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86_64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -4038,7 +4063,6 @@ void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(Location::RegisterLocation(RAX));
}
@@ -4055,7 +4079,7 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
DCHECK(!codegen_->IsLeafMethod());
}
}
@@ -5417,11 +5441,12 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kJitTableAddress:
- break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kJitTableAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -5429,15 +5454,16 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
}
void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(RAX),
- /* code_generator_supports_read_barrier */ true);
+ Location::RegisterLocation(RAX));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -5448,9 +5474,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
@@ -5467,14 +5491,14 @@ Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
}
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
@@ -5482,7 +5506,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -5497,9 +5521,10 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
- codegen_->RecordTypePatch(cls);
+ codegen_->RecordBootTypePatch(cls);
break;
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
@@ -5509,6 +5534,15 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
codegen_->RecordSimplePatch();
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
+ /* no_rip */ false);
+ Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
+ // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ true);
@@ -5518,33 +5552,6 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- uint32_t offset = cls->GetDexCacheElementOffset();
- Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(cls->GetDexFile(), offset);
- Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ false);
- // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
- __ movq(out,
- Address(current_method,
- ArtMethod::DexCacheResolvedTypesOffset(kX86_64PointerSize).Int32Value()));
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- GenerateGcRootFieldLoad(
- cls,
- out_loc,
- Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_)),
- /* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
default:
LOG(FATAL) << "Unexpected load kind: " << cls->GetLoadKind();
UNREACHABLE();
@@ -5600,11 +5607,11 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
}
return desired_string_load_kind;
}
@@ -5650,6 +5657,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
codegen_->RecordBootStringPatch(load);
return; // No dex cache slow path.
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 32d006c5f3..8b3ab4c438 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -409,7 +409,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void RecordSimplePatch();
void RecordBootStringPatch(HLoadString* load_string);
- void RecordTypePatch(HLoadClass* load_class);
+ void RecordBootTypePatch(HLoadClass* load_class);
+ Label* NewTypeBssEntryPatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
Label* NewJitRootStringPatch(const DexFile& dex_file,
@@ -604,8 +605,10 @@ class CodeGeneratorX86_64 : public CodeGenerator {
ArenaDeque<Label> simple_patches_;
// String patch locations; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PatchInfo<Label>> string_patches_;
- // Type patch locations.
- ArenaDeque<PatchInfo<Label>> type_patches_;
+ // Type patch locations for boot image (always PIC).
+ ArenaDeque<PatchInfo<Label>> boot_image_type_patches_;
+ // Type patch locations for kBssEntry.
+ ArenaDeque<PatchInfo<Label>> type_bss_entry_patches_;
// Fixups for jump tables need to be handled specially.
ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
index ae9e0febfb..9ddcd563ca 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.cc
@@ -59,21 +59,6 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
}
private:
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
- // If this is a load with PC-relative access to the dex cache types array,
- // we need to add the dex cache arrays base as the special input.
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCachePcRelative) {
- // Initialize base for target dex file if needed.
- const DexFile& dex_file = load_class->GetDexFile();
- HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(dex_file);
- // Update the element offset in base.
- DexCacheArraysLayout layout(kArmPointerSize, &dex_file);
- base->UpdateElementOffset(layout.TypeOffset(load_class->GetTypeIndex()));
- // Add the special argument base to the load.
- load_class->AddSpecialInput(base);
- }
- }
-
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
// If this is an invoke with PC-relative access to the dex cache methods array,
// we need to add the dex cache arrays base as the special input.
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.cc b/compiler/optimizing/dex_cache_array_fixups_mips.cc
index 798193e6e6..04a4294c48 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.cc
@@ -53,21 +53,6 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
}
private:
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
- // If this is a load with PC-relative access to the dex cache types array,
- // we need to add the dex cache arrays base as the special input.
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCachePcRelative) {
- // Initialize base for target dex file if needed.
- const DexFile& dex_file = load_class->GetDexFile();
- HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(dex_file);
- // Update the element offset in base.
- DexCacheArraysLayout layout(kMipsPointerSize, &dex_file);
- base->UpdateElementOffset(layout.TypeOffset(load_class->GetTypeIndex()));
- // Add the special argument base to the load.
- load_class->AddSpecialInput(base);
- }
- }
-
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
// If this is an invoke with PC-relative access to the dex cache methods array,
// we need to add the dex cache arrays base as the special input.
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 09dcefa02c..f6fba883bd 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -464,6 +464,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
}
+ void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ VisitInvoke(invoke);
+ StartAttributeStream("invoke_type") << "InvokePolymorphic";
+ }
+
void VisitInstanceFieldGet(HInstanceFieldGet* iget) OVERRIDE {
StartAttributeStream("field_name") <<
iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index d5c4c2fa69..6d8ae75460 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -368,10 +368,14 @@ void InductionVarRange::Replace(HInstruction* instruction,
}
}
-bool InductionVarRange::IsFinite(HLoopInformation* loop) const {
+bool InductionVarRange::IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const {
HInductionVarAnalysis::InductionInfo *trip =
induction_analysis_->LookupInfo(loop, GetLoopControl(loop));
- return trip != nullptr && !IsUnsafeTripCount(trip);
+ if (trip != nullptr && !IsUnsafeTripCount(trip)) {
+ IsConstant(trip->op_a, kExact, tc);
+ return true;
+ }
+ return false;
}
//
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index ba14847d82..6c424b78b9 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -150,9 +150,9 @@ class InductionVarRange {
}
/**
- * Checks if header logic of a loop terminates.
+ * Checks if header logic of a loop terminates. Sets trip-count tc if known.
*/
- bool IsFinite(HLoopInformation* loop) const;
+ bool IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const;
private:
/*
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index e5d05e9e6d..d7da46bbe7 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -308,8 +308,10 @@ ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
}
bool HInliner::TryInline(HInvoke* invoke_instruction) {
- if (invoke_instruction->IsInvokeUnresolved()) {
- return false; // Don't bother to move further if we know the method is unresolved.
+ if (invoke_instruction->IsInvokeUnresolved() ||
+ invoke_instruction->IsInvokePolymorphic()) {
+ return false; // Don't bother to move further if we know the method is unresolved or an
+ // invoke-polymorphic.
}
ScopedObjectAccess soa(Thread::Current());
@@ -1428,15 +1430,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
return false;
}
- if (current->IsNewInstance() &&
- (current->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectWithAccessCheck)) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it is using an entrypoint"
- << " with access checks";
- // Allocation entrypoint does not handle inlined frames.
- return false;
- }
-
if (current->IsNewArray() &&
(current->AsNewArray()->GetEntrypoint() == kQuickAllocArrayWithAccessCheck)) {
VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 5a75a01867..24c7030771 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1,3 +1,4 @@
+
/*
* Copyright (C) 2016 The Android Open Source Project
*
@@ -904,6 +905,33 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
false /* is_unresolved */);
}
+bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction ATTRIBUTE_UNUSED,
+ uint32_t dex_pc,
+ uint32_t method_idx,
+ uint32_t proto_idx,
+ uint32_t number_of_vreg_arguments,
+ bool is_range,
+ uint32_t* args,
+ uint32_t register_index) {
+ const char* descriptor = dex_file_->GetShorty(proto_idx);
+ DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), number_of_vreg_arguments);
+ Primitive::Type return_type = Primitive::GetType(descriptor[0]);
+ size_t number_of_arguments = strlen(descriptor);
+ HInvoke* invoke = new (arena_) HInvokePolymorphic(arena_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx);
+ return HandleInvoke(invoke,
+ number_of_vreg_arguments,
+ args,
+ register_index,
+ is_range,
+ descriptor,
+ nullptr /* clinit_check */,
+ false /* is_unresolved */);
+}
+
bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
@@ -915,11 +943,11 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d
bool finalizable;
bool needs_access_check = NeedsAccessCheck(type_index, dex_cache, &finalizable);
- // Only the non-resolved entrypoint handles the finalizable class case. If we
+ // Only the access check entrypoint handles the finalizable class case. If we
// need access checks, then we haven't resolved the method and the class may
// again be finalizable.
QuickEntrypointEnum entrypoint = (finalizable || needs_access_check)
- ? kQuickAllocObject
+ ? kQuickAllocObjectWithChecks
: kQuickAllocObjectInitialized;
if (outer_dex_cache.Get() != dex_cache.Get()) {
@@ -944,7 +972,6 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d
AppendInstruction(new (arena_) HNewInstance(
cls,
- graph_->GetCurrentMethod(),
dex_pc,
type_index,
*dex_compilation_unit_->GetDexFile(),
@@ -1914,6 +1941,37 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
break;
}
+ case Instruction::INVOKE_POLYMORPHIC: {
+ uint16_t method_idx = instruction.VRegB_45cc();
+ uint16_t proto_idx = instruction.VRegH_45cc();
+ uint32_t number_of_vreg_arguments = instruction.VRegA_45cc();
+ uint32_t args[5];
+ instruction.GetVarArgs(args);
+ return BuildInvokePolymorphic(instruction,
+ dex_pc,
+ method_idx,
+ proto_idx,
+ number_of_vreg_arguments,
+ false,
+ args,
+ -1);
+ }
+
+ case Instruction::INVOKE_POLYMORPHIC_RANGE: {
+ uint16_t method_idx = instruction.VRegB_4rcc();
+ uint16_t proto_idx = instruction.VRegH_4rcc();
+ uint32_t number_of_vreg_arguments = instruction.VRegA_4rcc();
+ uint32_t register_index = instruction.VRegC_4rcc();
+ return BuildInvokePolymorphic(instruction,
+ dex_pc,
+ method_idx,
+ proto_idx,
+ number_of_vreg_arguments,
+ true,
+ nullptr,
+ register_index);
+ }
+
case Instruction::NEG_INT: {
Unop_12x<HNeg>(instruction, Primitive::kPrimInt, dex_pc);
break;
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index f29e522040..aef0b94c1f 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -175,6 +175,17 @@ class HInstructionBuilder : public ValueObject {
uint32_t* args,
uint32_t register_index);
+ // Builds an invocation node for invoke-polymorphic and returns whether the
+ // instruction is supported.
+ bool BuildInvokePolymorphic(const Instruction& instruction,
+ uint32_t dex_pc,
+ uint32_t method_idx,
+ uint32_t proto_idx,
+ uint32_t number_of_vreg_arguments,
+ bool is_range,
+ uint32_t* args,
+ uint32_t register_index);
+
// Builds a new array node and the instructions that fill it.
void BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index cda3185a45..f1ae549928 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -752,8 +752,9 @@ static void MathAbsFP(LocationSummary* locations,
FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
FRegister out = locations->Out().AsFpuRegister<FRegister>();
- // As a "quality of implementation", rather than pure "spec compliance", it is required that
- // Math.abs() clears the sign bit (but changes nothing else) for all numbers, including NaN.
+ // Note, as a "quality of implementation", rather than pure "spec compliance", we require that
+ // Math.abs() clears the sign bit (but changes nothing else) for all numbers, including NaN
+ // (signaling NaN may become quiet though).
//
// The ABS.fmt instructions (abs.s and abs.d) do exactly that when NAN2008=1 (R6). For this case,
// both regular floating point numbers and NAN values are treated alike, only the sign bit is
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 2856c3ea11..4f30b11753 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -943,6 +943,10 @@ class LSEVisitor : public HGraphVisitor {
HandleInvoke(invoke);
}
+ void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
HandleInvoke(clinit);
}
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 9d73e29602..95838380cc 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -161,26 +161,27 @@ void HLoopOptimization::RemoveLoop(LoopNode* node) {
void HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
for ( ; node != nullptr; node = node->next) {
+ // Visit inner loops first.
int current_induction_simplification_count = induction_simplication_count_;
if (node->inner != nullptr) {
TraverseLoopsInnerToOuter(node->inner);
}
- // Visit loop after its inner loops have been visited. If the induction of any inner
- // loop has been simplified, recompute the induction information of this loop first.
+ // Recompute induction information of this loop if the induction
+ // of any inner loop has been simplified.
if (current_induction_simplification_count != induction_simplication_count_) {
induction_range_.ReVisit(node->loop_info);
}
- // Repeat simplifications until no more changes occur. Note that since
- // each simplification consists of eliminating code (without introducing
- // new code), this process is always finite.
+ // Repeat simplifications in the body of this loop until no more changes occur.
+ // Note that since each simplification consists of eliminating code (without
+ // introducing new code), this process is always finite.
do {
simplified_ = false;
- SimplifyBlocks(node);
SimplifyInduction(node);
+ SimplifyBlocks(node);
} while (simplified_);
- // Remove inner loops when empty.
+ // Simplify inner loop.
if (node->inner == nullptr) {
- RemoveIfEmptyInnerLoop(node);
+ SimplifyInnerLoop(node);
}
}
}
@@ -198,7 +199,7 @@ void HLoopOptimization::SimplifyInduction(LoopNode* node) {
iset_->clear();
int32_t use_count = 0;
if (IsPhiInduction(phi) &&
- IsOnlyUsedAfterLoop(node->loop_info, phi, &use_count) &&
+ IsOnlyUsedAfterLoop(node->loop_info, phi, /*collect_loop_uses*/ false, &use_count) &&
// No uses, or no early-exit with proper replacement.
(use_count == 0 ||
(!IsEarlyExit(node->loop_info) && TryReplaceWithLastValue(phi, preheader)))) {
@@ -206,7 +207,6 @@ void HLoopOptimization::SimplifyInduction(LoopNode* node) {
RemoveFromCycle(i);
}
simplified_ = true;
- induction_simplication_count_++;
}
}
}
@@ -216,24 +216,14 @@ void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
// Remove dead instructions from the loop-body.
- for (HBackwardInstructionIterator i(block->GetInstructions()); !i.Done(); i.Advance()) {
- HInstruction* instruction = i.Current();
- if (instruction->IsDeadAndRemovable()) {
- simplified_ = true;
- block->RemoveInstruction(instruction);
- }
- }
+ RemoveDeadInstructions(block->GetPhis());
+ RemoveDeadInstructions(block->GetInstructions());
// Remove trivial control flow blocks from the loop-body.
- HBasicBlock* succ = nullptr;
- if (IsGotoBlock(block, &succ) && succ->GetPredecessors().size() == 1) {
- // Trivial goto block can be removed.
- HBasicBlock* pred = block->GetSinglePredecessor();
+ if (block->GetPredecessors().size() == 1 &&
+ block->GetSuccessors().size() == 1 &&
+ block->GetSingleSuccessor()->GetPredecessors().size() == 1) {
simplified_ = true;
- pred->ReplaceSuccessor(block, succ);
- block->RemoveDominatedBlock(succ);
- block->DisconnectAndDelete();
- pred->AddDominatedBlock(succ);
- succ->SetDominator(pred);
+ block->MergeWith(block->GetSingleSuccessor());
} else if (block->GetSuccessors().size() == 2) {
// Trivial if block can be bypassed to either branch.
HBasicBlock* succ0 = block->GetSuccessors()[0];
@@ -258,55 +248,66 @@ void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
}
}
-void HLoopOptimization::RemoveIfEmptyInnerLoop(LoopNode* node) {
+bool HLoopOptimization::SimplifyInnerLoop(LoopNode* node) {
HBasicBlock* header = node->loop_info->GetHeader();
HBasicBlock* preheader = node->loop_info->GetPreHeader();
// Ensure loop header logic is finite.
- if (!induction_range_.IsFinite(node->loop_info)) {
- return;
+ int64_t tc = 0;
+ if (!induction_range_.IsFinite(node->loop_info, &tc)) {
+ return false;
}
// Ensure there is only a single loop-body (besides the header).
HBasicBlock* body = nullptr;
for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
if (it.Current() != header) {
if (body != nullptr) {
- return;
+ return false;
}
body = it.Current();
}
}
// Ensure there is only a single exit point.
if (header->GetSuccessors().size() != 2) {
- return;
+ return false;
}
HBasicBlock* exit = (header->GetSuccessors()[0] == body)
? header->GetSuccessors()[1]
: header->GetSuccessors()[0];
// Ensure exit can only be reached by exiting loop.
if (exit->GetPredecessors().size() != 1) {
- return;
+ return false;
}
- // Detect an empty loop: no side effects other than plain iteration. Replace
- // subsequent index uses, if any, with the last value and remove the loop.
+ // Detect either an empty loop (no side effects other than plain iteration) or
+ // a trivial loop (just iterating once). Replace subsequent index uses, if any,
+ // with the last value and remove the loop, possibly after unrolling its body.
+ HInstruction* phi = header->GetFirstPhi();
iset_->clear();
int32_t use_count = 0;
- if (IsEmptyHeader(header) &&
- IsEmptyBody(body) &&
- IsOnlyUsedAfterLoop(node->loop_info, header->GetFirstPhi(), &use_count) &&
- // No uses, or proper replacement.
- (use_count == 0 || TryReplaceWithLastValue(header->GetFirstPhi(), preheader))) {
- body->DisconnectAndDelete();
- exit->RemovePredecessor(header);
- header->RemoveSuccessor(exit);
- header->RemoveDominatedBlock(exit);
- header->DisconnectAndDelete();
- preheader->AddSuccessor(exit);
- preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
- preheader->AddDominatedBlock(exit);
- exit->SetDominator(preheader);
- // Update hierarchy.
- RemoveLoop(node);
+ if (IsEmptyHeader(header)) {
+ bool is_empty = IsEmptyBody(body);
+ if ((is_empty || tc == 1) &&
+ IsOnlyUsedAfterLoop(node->loop_info, phi, /*collect_loop_uses*/ true, &use_count) &&
+ // No uses, or proper replacement.
+ (use_count == 0 || TryReplaceWithLastValue(phi, preheader))) {
+ if (!is_empty) {
+ // Unroll the loop body, which sees initial value of the index.
+ phi->ReplaceWith(phi->InputAt(0));
+ preheader->MergeInstructionsWith(body);
+ }
+ body->DisconnectAndDelete();
+ exit->RemovePredecessor(header);
+ header->RemoveSuccessor(exit);
+ header->RemoveDominatedBlock(exit);
+ header->DisconnectAndDelete();
+ preheader->AddSuccessor(exit);
+ preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
+ preheader->AddDominatedBlock(exit);
+ exit->SetDominator(preheader);
+ RemoveLoop(node); // update hierarchy
+ return true;
+ }
}
+ return false;
}
bool HLoopOptimization::IsPhiInduction(HPhi* phi) {
@@ -374,12 +375,19 @@ bool HLoopOptimization::IsEmptyBody(HBasicBlock* block) {
bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
+ bool collect_loop_uses,
/*out*/ int32_t* use_count) {
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
if (iset_->find(user) == iset_->end()) { // not excluded?
HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
if (other_loop_info != nullptr && other_loop_info->IsIn(*loop_info)) {
+ // If collect_loop_uses is set, simply keep adding those uses to the set.
+ // Otherwise, reject uses inside the loop that were not already in the set.
+ if (collect_loop_uses) {
+ iset_->insert(user);
+ continue;
+ }
return false;
}
++*use_count;
@@ -388,40 +396,48 @@ bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
return true;
}
-void HLoopOptimization::ReplaceAllUses(HInstruction* instruction, HInstruction* replacement) {
- const HUseList<HInstruction*>& uses = instruction->GetUses();
- for (auto it = uses.begin(), end = uses.end(); it != end;) {
- HInstruction* user = it->GetUser();
- size_t index = it->GetIndex();
- ++it; // increment before replacing
- if (iset_->find(user) == iset_->end()) { // not excluded?
- user->ReplaceInput(replacement, index);
- induction_range_.Replace(user, instruction, replacement); // update induction
- }
- }
- const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
- for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
- HEnvironment* user = it->GetUser();
- size_t index = it->GetIndex();
- ++it; // increment before replacing
- if (iset_->find(user->GetHolder()) == iset_->end()) { // not excluded?
- user->RemoveAsUserOfInput(index);
- user->SetRawEnvAt(index, replacement);
- replacement->AddEnvUseAt(user, index);
- }
- }
-}
-
bool HLoopOptimization::TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block) {
// Try to replace outside uses with the last value. Environment uses can consume this
// value too, since any first true use is outside the loop (although this may imply
// that de-opting may look "ahead" a bit on the phi value). If there are only environment
// uses, the value is dropped altogether, since the computations have no effect.
if (induction_range_.CanGenerateLastValue(instruction)) {
- ReplaceAllUses(instruction, induction_range_.GenerateLastValue(instruction, graph_, block));
+ HInstruction* replacement = induction_range_.GenerateLastValue(instruction, graph_, block);
+ const HUseList<HInstruction*>& uses = instruction->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end;) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment before replacing
+ if (iset_->find(user) == iset_->end()) { // not excluded?
+ user->ReplaceInput(replacement, index);
+ induction_range_.Replace(user, instruction, replacement); // update induction
+ }
+ }
+ const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
+ for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
+ HEnvironment* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment before replacing
+ if (iset_->find(user->GetHolder()) == iset_->end()) { // not excluded?
+ user->RemoveAsUserOfInput(index);
+ user->SetRawEnvAt(index, replacement);
+ replacement->AddEnvUseAt(user, index);
+ }
+ }
+ induction_simplication_count_++;
return true;
}
return false;
}
+void HLoopOptimization::RemoveDeadInstructions(const HInstructionList& list) {
+ for (HBackwardInstructionIterator i(list); !i.Done(); i.Advance()) {
+ HInstruction* instruction = i.Current();
+ if (instruction->IsDeadAndRemovable()) {
+ simplified_ = true;
+ instruction->GetBlock()->RemoveInstructionOrPhi(instruction);
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 0f05b24c37..9ddab4150c 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -60,19 +60,21 @@ class HLoopOptimization : public HOptimization {
void TraverseLoopsInnerToOuter(LoopNode* node);
+ // Simplification.
void SimplifyInduction(LoopNode* node);
void SimplifyBlocks(LoopNode* node);
- void RemoveIfEmptyInnerLoop(LoopNode* node);
+ bool SimplifyInnerLoop(LoopNode* node);
+ // Helpers.
bool IsPhiInduction(HPhi* phi);
bool IsEmptyHeader(HBasicBlock* block);
bool IsEmptyBody(HBasicBlock* block);
-
bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
+ bool collect_loop_uses,
/*out*/ int32_t* use_count);
- void ReplaceAllUses(HInstruction* instruction, HInstruction* replacement);
bool TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block);
+ void RemoveDeadInstructions(const HInstructionList& list);
// Range information based on prior induction variable analysis.
InductionVarRange induction_range_;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index f7c2f9735c..81662fabc5 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1853,6 +1853,14 @@ void HBasicBlock::DisconnectAndDelete() {
SetGraph(nullptr);
}
+void HBasicBlock::MergeInstructionsWith(HBasicBlock* other) {
+ DCHECK(EndsWithControlFlowInstruction());
+ RemoveInstruction(GetLastInstruction());
+ instructions_.Add(other->GetInstructions());
+ other->instructions_.SetBlockOfInstructions(this);
+ other->instructions_.Clear();
+}
+
void HBasicBlock::MergeWith(HBasicBlock* other) {
DCHECK_EQ(GetGraph(), other->GetGraph());
DCHECK(ContainsElement(dominated_blocks_, other));
@@ -1861,11 +1869,7 @@ void HBasicBlock::MergeWith(HBasicBlock* other) {
DCHECK(other->GetPhis().IsEmpty());
// Move instructions from `other` to `this`.
- DCHECK(EndsWithControlFlowInstruction());
- RemoveInstruction(GetLastInstruction());
- instructions_.Add(other->GetInstructions());
- other->instructions_.SetBlockOfInstructions(this);
- other->instructions_.Clear();
+ MergeInstructionsWith(other);
// Remove `other` from the loops it is included in.
for (HLoopInformationOutwardIterator it(*other); !it.Done(); it.Advance()) {
@@ -2463,7 +2467,7 @@ bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
case LoadKind::kJitTableAddress:
return AsMirrorInternal(GetAddress()) == AsMirrorInternal(other_load_class->GetAddress());
default:
- DCHECK(HasTypeReference(GetLoadKind()) || HasDexCacheReference(GetLoadKind()));
+ DCHECK(HasTypeReference(GetLoadKind()));
return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
}
}
@@ -2494,10 +2498,10 @@ std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs) {
return os << "BootImageLinkTimePcRelative";
case HLoadClass::LoadKind::kBootImageAddress:
return os << "BootImageAddress";
+ case HLoadClass::LoadKind::kBssEntry:
+ return os << "BssEntry";
case HLoadClass::LoadKind::kJitTableAddress:
return os << "JitTableAddress";
- case HLoadClass::LoadKind::kDexCachePcRelative:
- return os << "DexCachePcRelative";
case HLoadClass::LoadKind::kDexCacheViaMethod:
return os << "DexCacheViaMethod";
default:
@@ -2559,10 +2563,10 @@ std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs) {
return os << "BootImageAddress";
case HLoadString::LoadKind::kBssEntry:
return os << "BssEntry";
- case HLoadString::LoadKind::kDexCacheViaMethod:
- return os << "DexCacheViaMethod";
case HLoadString::LoadKind::kJitTableAddress:
return os << "JitTableAddress";
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ return os << "DexCacheViaMethod";
default:
LOG(FATAL) << "Unknown HLoadString::LoadKind: " << static_cast<int>(rhs);
UNREACHABLE();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index e32c661176..35190d2d8c 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1097,6 +1097,9 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
// with a control flow instruction).
void ReplaceWith(HBasicBlock* other);
+ // Merges the instructions of `other` at the end of `this`.
+ void MergeInstructionsWith(HBasicBlock* other);
+
// Merge `other` at the end of `this`. This method updates loops, reverse post
// order, links to predecessors, successors, dominators and deletes the block
// from the graph. The two blocks must be successive, i.e. `this` the only
@@ -1291,6 +1294,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(InvokeInterface, Invoke) \
M(InvokeStaticOrDirect, Invoke) \
M(InvokeVirtual, Invoke) \
+ M(InvokePolymorphic, Invoke) \
M(LessThan, Condition) \
M(LessThanOrEqual, Condition) \
M(LoadClass, Instruction) \
@@ -3758,10 +3762,9 @@ class HCompare FINAL : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HCompare);
};
-class HNewInstance FINAL : public HExpression<2> {
+class HNewInstance FINAL : public HExpression<1> {
public:
HNewInstance(HInstruction* cls,
- HCurrentMethod* current_method,
uint32_t dex_pc,
dex::TypeIndex type_index,
const DexFile& dex_file,
@@ -3775,7 +3778,6 @@ class HNewInstance FINAL : public HExpression<2> {
SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
SetPackedFlag<kFlagFinalizable>(finalizable);
SetRawInputAt(0, cls);
- SetRawInputAt(1, current_method);
}
dex::TypeIndex GetTypeIndex() const { return type_index_; }
@@ -3968,6 +3970,28 @@ class HInvokeUnresolved FINAL : public HInvoke {
DISALLOW_COPY_AND_ASSIGN(HInvokeUnresolved);
};
+class HInvokePolymorphic FINAL : public HInvoke {
+ public:
+ HInvokePolymorphic(ArenaAllocator* arena,
+ uint32_t number_of_arguments,
+ Primitive::Type return_type,
+ uint32_t dex_pc,
+ uint32_t dex_method_index)
+ : HInvoke(arena,
+ number_of_arguments,
+ 0u /* number_of_other_inputs */,
+ return_type,
+ dex_pc,
+ dex_method_index,
+ nullptr,
+ kVirtual) {}
+
+ DECLARE_INSTRUCTION(InvokePolymorphic);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HInvokePolymorphic);
+};
+
class HInvokeStaticOrDirect FINAL : public HInvoke {
public:
// Requirements of this method call regarding the class
@@ -5502,14 +5526,13 @@ class HLoadClass FINAL : public HInstruction {
// GetIncludePatchInformation().
kBootImageAddress,
+ // Load from an entry in the .bss section using a PC-relative load.
+ // Used for classes outside boot image when .bss is accessible with a PC-relative load.
+ kBssEntry,
+
// Load from the root table associated with the JIT compiled method.
kJitTableAddress,
- // Load from resolved types array in the dex cache using a PC-relative load.
- // Used for classes outside boot image when we know that we can access
- // the dex cache arrays using a PC-relative load.
- kDexCachePcRelative,
-
// Load from resolved types array accessed through the class loaded from
// the compiled method's own ArtMethod*. This is the default access type when
// all other types are unavailable.
@@ -5528,6 +5551,7 @@ class HLoadClass FINAL : public HInstruction {
special_input_(HUserRecord<HInstruction*>(current_method)),
type_index_(type_index),
dex_file_(dex_file),
+ address_(0u),
loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
// Referrers class should not need access check. We never inline unverified
// methods so we can't possibly end up in this situation.
@@ -5536,14 +5560,14 @@ class HLoadClass FINAL : public HInstruction {
SetPackedField<LoadKindField>(
is_referrers_class ? LoadKind::kReferrersClass : LoadKind::kDexCacheViaMethod);
SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
- SetPackedFlag<kFlagIsInDexCache>(false);
SetPackedFlag<kFlagIsInBootImage>(false);
SetPackedFlag<kFlagGenerateClInitCheck>(false);
}
void SetLoadKindWithAddress(LoadKind load_kind, uint64_t address) {
DCHECK(HasAddress(load_kind));
- load_data_.address = address;
+ DCHECK_NE(address, 0u);
+ address_ = address;
SetLoadKindInternal(load_kind);
}
@@ -5556,15 +5580,6 @@ class HLoadClass FINAL : public HInstruction {
SetLoadKindInternal(load_kind);
}
- void SetLoadKindWithDexCacheReference(LoadKind load_kind,
- const DexFile& dex_file,
- uint32_t element_index) {
- DCHECK(HasDexCacheReference(load_kind));
- DCHECK(IsSameDexFile(dex_file_, dex_file));
- load_data_.dex_cache_element_index = element_index;
- SetLoadKindInternal(load_kind);
- }
-
LoadKind GetLoadKind() const {
return GetPackedField<LoadKindField>();
}
@@ -5589,13 +5604,21 @@ class HLoadClass FINAL : public HInstruction {
}
bool CanCallRuntime() const {
- return MustGenerateClinitCheck() ||
- (!IsReferrersClass() && !IsInDexCache()) ||
- NeedsAccessCheck();
+ return NeedsAccessCheck() ||
+ MustGenerateClinitCheck() ||
+ GetLoadKind() == LoadKind::kDexCacheViaMethod ||
+ GetLoadKind() == LoadKind::kBssEntry;
}
bool CanThrow() const OVERRIDE {
- return CanCallRuntime();
+ return NeedsAccessCheck() ||
+ MustGenerateClinitCheck() ||
+ // If the class is in the boot image, the lookup in the runtime call cannot throw.
+ // This keeps CanThrow() consistent between non-PIC (using kBootImageAddress) and
+ // PIC and subsequently avoids a DCE behavior dependency on the PIC option.
+ ((GetLoadKind() == LoadKind::kDexCacheViaMethod ||
+ GetLoadKind() == LoadKind::kBssEntry) &&
+ !IsInBootImage());
}
ReferenceTypeInfo GetLoadedClassRTI() {
@@ -5611,15 +5634,13 @@ class HLoadClass FINAL : public HInstruction {
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
- uint32_t GetDexCacheElementOffset() const;
-
uint64_t GetAddress() const {
DCHECK(HasAddress(GetLoadKind()));
- return load_data_.address;
+ return address_;
}
bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
- return !IsReferrersClass();
+ return GetLoadKind() == LoadKind::kDexCacheViaMethod;
}
static SideEffects SideEffectsForArchRuntimeCalls() {
@@ -5628,17 +5649,9 @@ class HLoadClass FINAL : public HInstruction {
bool IsReferrersClass() const { return GetLoadKind() == LoadKind::kReferrersClass; }
bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
- bool IsInDexCache() const { return GetPackedFlag<kFlagIsInDexCache>(); }
bool IsInBootImage() const { return GetPackedFlag<kFlagIsInBootImage>(); }
bool MustGenerateClinitCheck() const { return GetPackedFlag<kFlagGenerateClInitCheck>(); }
- void MarkInDexCache() {
- SetPackedFlag<kFlagIsInDexCache>(true);
- DCHECK(!NeedsEnvironment());
- RemoveEnvironment();
- SetSideEffects(SideEffects::None());
- }
-
void MarkInBootImage() {
SetPackedFlag<kFlagIsInBootImage>(true);
}
@@ -5659,8 +5672,7 @@ class HLoadClass FINAL : public HInstruction {
private:
static constexpr size_t kFlagNeedsAccessCheck = kNumberOfGenericPackedBits;
- static constexpr size_t kFlagIsInDexCache = kFlagNeedsAccessCheck + 1;
- static constexpr size_t kFlagIsInBootImage = kFlagIsInDexCache + 1;
+ static constexpr size_t kFlagIsInBootImage = kFlagNeedsAccessCheck + 1;
// Whether this instruction must generate the initialization check.
// Used for code generation.
static constexpr size_t kFlagGenerateClInitCheck = kFlagIsInBootImage + 1;
@@ -5672,10 +5684,11 @@ class HLoadClass FINAL : public HInstruction {
using LoadKindField = BitField<LoadKind, kFieldLoadKind, kFieldLoadKindSize>;
static bool HasTypeReference(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageLinkTimeAddress ||
+ return load_kind == LoadKind::kReferrersClass ||
+ load_kind == LoadKind::kBootImageLinkTimeAddress ||
load_kind == LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == LoadKind::kDexCacheViaMethod ||
- load_kind == LoadKind::kReferrersClass;
+ load_kind == LoadKind::kBssEntry ||
+ load_kind == LoadKind::kDexCacheViaMethod;
}
static bool HasAddress(LoadKind load_kind) {
@@ -5683,24 +5696,17 @@ class HLoadClass FINAL : public HInstruction {
load_kind == LoadKind::kJitTableAddress;
}
- static bool HasDexCacheReference(LoadKind load_kind) {
- return load_kind == LoadKind::kDexCachePcRelative;
- }
-
void SetLoadKindInternal(LoadKind load_kind);
// The special input is the HCurrentMethod for kDexCacheViaMethod or kReferrersClass.
// For other load kinds it's empty or possibly some architecture-specific instruction
- // for PC-relative loads, i.e. kDexCachePcRelative or kBootImageLinkTimePcRelative.
+ // for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
- union {
- uint32_t dex_cache_element_index; // Only for dex cache reference.
- uint64_t address; // Up to 64-bit, needed for kJitTableAddress on 64-bit targets.
- } load_data_;
+ uint64_t address_; // Up to 64-bit, needed for kJitTableAddress on 64-bit targets.
ReferenceTypeInfo loaded_class_rti_;
@@ -5709,19 +5715,13 @@ class HLoadClass FINAL : public HInstruction {
std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs);
// Note: defined outside class to see operator<<(., HLoadClass::LoadKind).
-inline uint32_t HLoadClass::GetDexCacheElementOffset() const {
- DCHECK(HasDexCacheReference(GetLoadKind())) << GetLoadKind();
- return load_data_.dex_cache_element_index;
-}
-
-// Note: defined outside class to see operator<<(., HLoadClass::LoadKind).
inline void HLoadClass::AddSpecialInput(HInstruction* special_input) {
// The special input is used for PC-relative loads on some architectures,
// including literal pool loads, which are PC-relative too.
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
- GetLoadKind() == LoadKind::kDexCachePcRelative ||
GetLoadKind() == LoadKind::kBootImageLinkTimeAddress ||
- GetLoadKind() == LoadKind::kBootImageAddress) << GetLoadKind();
+ GetLoadKind() == LoadKind::kBootImageAddress ||
+ GetLoadKind() == LoadKind::kBssEntry) << GetLoadKind();
DCHECK(special_input_.GetInstruction() == nullptr);
special_input_ = HUserRecord<HInstruction*>(special_input);
special_input->AddUseAt(this, 0);
@@ -5749,15 +5749,15 @@ class HLoadString FINAL : public HInstruction {
// Used for strings outside boot image when .bss is accessible with a PC-relative load.
kBssEntry,
+ // Load from the root table associated with the JIT compiled method.
+ kJitTableAddress,
+
// Load from resolved strings array accessed through the class loaded from
// the compiled method's own ArtMethod*. This is the default access type when
// all other types are unavailable.
kDexCacheViaMethod,
- // Load from the root table associated with the JIT compiled method.
- kJitTableAddress,
-
- kLast = kJitTableAddress,
+ kLast = kDexCacheViaMethod,
};
HLoadString(HCurrentMethod* current_method,
@@ -5849,7 +5849,7 @@ class HLoadString FINAL : public HInstruction {
// The special input is the HCurrentMethod for kDexCacheViaMethod.
// For other load kinds it's empty or possibly some architecture-specific instruction
- // for PC-relative loads, i.e. kDexCachePcRelative or kBootImageLinkTimePcRelative.
+ // for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
dex::StringIndex string_index_;
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index e321b9e3aa..a0fdde169d 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -62,8 +62,9 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBssEntry:
// Add a base register for PC-relative literals on R2.
InitializePCRelativeBasePointer();
load_class->AddSpecialInput(base_);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index b1fdb1792d..2befc8ca4e 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -83,7 +83,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ load_kind == HLoadClass::LoadKind::kBssEntry) {
InitializePCRelativeBasePointer();
load_class->AddSpecialInput(base_);
}
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index d6a6efe604..efbaf6c221 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -133,39 +133,6 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
}
}
-void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) {
- HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass();
- const bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse();
- // Change the entrypoint to kQuickAllocObject if either:
- // - the class is finalizable (only kQuickAllocObject handles finalizable classes),
- // - the class needs access checks (we do not know if it's finalizable),
- // - or the load class has only one use.
- if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObject);
- instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex().index_), 0);
- if (has_only_one_use) {
- // We've just removed the only use of the HLoadClass. Since we don't run DCE after this pass,
- // do it manually if possible.
- if (!load_class->CanThrow()) {
- // If the load class can not throw, it has no side effects and can be removed if there is
- // only one use.
- load_class->GetBlock()->RemoveInstruction(load_class);
- } else if (!instruction->GetEnvironment()->IsFromInlinedInvoke() &&
- CanMoveClinitCheck(load_class, instruction)) {
- // The allocation entry point that deals with access checks does not work with inlined
- // methods, so we need to check whether this allocation comes from an inlined method.
- // We also need to make the same check as for moving clinit check, whether the HLoadClass
- // has the clinit check responsibility or not (HLoadClass can throw anyway).
- // If it needed access checks, we delegate the access check to the allocation.
- if (load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck);
- }
- load_class->GetBlock()->RemoveInstruction(load_class);
- }
- }
- }
-}
-
bool PrepareForRegisterAllocation::CanEmitConditionAt(HCondition* condition,
HInstruction* user) const {
if (condition->GetNext() != user) {
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a6791482a7..c128227654 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -44,7 +44,6 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
void VisitCondition(HCondition* condition) OVERRIDE;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
- void VisitNewInstance(HNewInstance* instruction) OVERRIDE;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 213a5acba6..e647de8c80 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -295,13 +295,13 @@ static void BoundTypeForClassCheck(HInstruction* check) {
}
if (check->IsIf()) {
- HBasicBlock* trueBlock = check->IsEqual()
+ HBasicBlock* trueBlock = compare->IsEqual()
? check->AsIf()->IfTrueSuccessor()
: check->AsIf()->IfFalseSuccessor();
BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
} else {
DCHECK(check->IsDeoptimize());
- if (check->IsEqual()) {
+ if (compare->IsEqual()) {
BoundTypeIn(receiver, check->GetBlock(), check, class_rti);
}
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index dc8ee23ba4..eabda2675e 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -154,13 +154,24 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
<< load_class->GetLoadKind();
- DCHECK(!load_class->IsInDexCache()) << "HLoadClass should not be optimized before sharpening.";
DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
+ if (load_class->NeedsAccessCheck()) {
+ // We need to call the runtime anyway, so we simply get the class as that call's return value.
+ return;
+ }
+
+ if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
+ // Loading from the ArtMethod* is the most efficient retrieval in code size.
+ // TODO: This may not actually be true for all architectures and
+ // locations of target classes. The additional register pressure
+ // for using the ArtMethod* should be considered.
+ return;
+ }
+
const DexFile& dex_file = load_class->GetDexFile();
dex::TypeIndex type_index = load_class->GetTypeIndex();
- bool is_in_dex_cache = false;
bool is_in_boot_image = false;
HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
uint64_t address = 0u; // Class or dex cache element address.
@@ -169,31 +180,29 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
// Compiling boot image. Check if the class is a boot image class.
DCHECK(!runtime->UseJitCompilation());
if (!compiler_driver->GetSupportBootImageFixup()) {
- // MIPS64 or compiler_driver_test. Do not sharpen.
+ // compiler_driver_test. Do not sharpen.
desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
} else if ((klass != nullptr) && compiler_driver->IsImageClass(
dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
is_in_boot_image = true;
- is_in_dex_cache = true;
desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
: HLoadClass::LoadKind::kBootImageLinkTimeAddress;
} else {
- // Not a boot image class. We must go through the dex cache.
+ // Not a boot image class.
DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
- desired_load_kind = HLoadClass::LoadKind::kDexCachePcRelative;
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
} else {
is_in_boot_image = (klass != nullptr) && runtime->GetHeap()->ObjectIsInBootImageSpace(klass);
if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- is_in_dex_cache = (klass != nullptr);
if (is_in_boot_image) {
// TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
address = reinterpret_cast64<uint64_t>(klass);
- } else if (is_in_dex_cache) {
+ } else if (klass != nullptr) {
desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
// We store in the address field the location of the stack reference maintained
// by the handle. We do this now so that the code generation does not need to figure
@@ -212,12 +221,7 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
address = reinterpret_cast64<uint64_t>(klass);
} else {
// Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
- // Use PC-relative load from the dex cache if the dex file belongs
- // to the oat file that we're currently compiling.
- desired_load_kind =
- ContainsElement(compiler_driver->GetDexFilesForOatFile(), &load_class->GetDexFile())
- ? HLoadClass::LoadKind::kDexCachePcRelative
- : HLoadClass::LoadKind::kDexCacheViaMethod;
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
}
DCHECK_NE(desired_load_kind, static_cast<HLoadClass::LoadKind>(-1));
@@ -226,27 +230,11 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
load_class->MarkInBootImage();
}
- if (load_class->NeedsAccessCheck()) {
- // We need to call the runtime anyway, so we simply get the class as that call's return value.
- return;
- }
-
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
- // Loading from the ArtMethod* is the most efficient retrieval in code size.
- // TODO: This may not actually be true for all architectures and
- // locations of target classes. The additional register pressure
- // for using the ArtMethod* should be considered.
- return;
- }
-
- if (is_in_dex_cache) {
- load_class->MarkInDexCache();
- }
-
HLoadClass::LoadKind load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
case HLoadClass::LoadKind::kDexCacheViaMethod:
load_class->SetLoadKindWithTypeReference(load_kind, dex_file, type_index);
break;
@@ -255,13 +243,6 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
DCHECK_NE(address, 0u);
load_class->SetLoadKindWithAddress(load_kind, address);
break;
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- PointerSize pointer_size = InstructionSetPointerSize(codegen->GetInstructionSet());
- DexCacheArraysLayout layout(pointer_size, &dex_file);
- size_t element_index = layout.TypeOffset(type_index);
- load_class->SetLoadKindWithDexCacheReference(load_kind, dex_file, element_index);
- break;
- }
default:
LOG(FATAL) << "Unexpected load kind: " << load_kind;
UNREACHABLE();
@@ -274,7 +255,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
const DexFile& dex_file = load_string->GetDexFile();
dex::StringIndex string_index = load_string->GetStringIndex();
- HLoadString::LoadKind desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
+ HLoadString::LoadKind desired_load_kind = static_cast<HLoadString::LoadKind>(-1);
{
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
@@ -297,8 +278,8 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
? HLoadString::LoadKind::kBootImageLinkTimePcRelative
: HLoadString::LoadKind::kBootImageLinkTimeAddress;
} else {
- // MIPS64 or compiler_driver_test. Do not sharpen.
- DCHECK_EQ(desired_load_kind, HLoadString::LoadKind::kDexCacheViaMethod);
+ // compiler_driver_test. Do not sharpen.
+ desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
}
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
@@ -310,6 +291,8 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
} else {
desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
}
+ } else {
+ desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
}
} else {
// AOT app compilation. Try to lookup the string without allocating if not found.
@@ -326,6 +309,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
load_string->SetString(handles_->NewHandle(string));
}
}
+ DCHECK_NE(desired_load_kind, static_cast<HLoadString::LoadKind>(-1));
HLoadString::LoadKind load_kind = codegen_->GetSupportedLoadStringKind(desired_load_kind);
load_string->SetLoadKind(load_kind);
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index a71ab4b53c..102c313a6a 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1124,28 +1124,23 @@ END art_quick_resolve_string
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
- // r2, r3, r12: free.
- ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
- // Load the class (r2)
- ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- cbz r2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
-
+ // r0: type/return value, r9: Thread::Current
+ // r1, r2, r3, r12: free.
ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// TODO: consider using ldrd.
ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp r3, r12
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
- ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3)
+ ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3)
cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size. Since the size is
// already aligned we can combine the
@@ -1159,7 +1154,7 @@ ENTRY art_quick_alloc_object_rosalloc
// Load the free list head (r3). This
// will be the return val.
ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
- cbz r3, .Lart_quick_alloc_object_rosalloc_slow_path
+ cbz r3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
@@ -1172,8 +1167,8 @@ ENTRY art_quick_alloc_object_rosalloc
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF r2
- str r2, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
+ POISON_HEAP_REF r0
+ str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
// Fence. This is "ish" not "ishst" so
// that it also ensures ordering of
// the class status load with respect
@@ -1204,20 +1199,20 @@ ENTRY art_quick_alloc_object_rosalloc
mov r0, r3 // Set the return value and return.
bx lr
-.Lart_quick_alloc_object_rosalloc_slow_path:
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
- bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*)
+ mov r1, r9 @ pass Thread::Current
+ bl artAllocObjectFromCodeResolvedRosAlloc @ (mirror::Class* cls, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_rosalloc
+END art_quick_alloc_object_resolved_rosalloc
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+// The common fast path code for art_quick_alloc_object_resolved_tlab
+// and art_quick_alloc_object_resolved_region_tlab.
//
-// r0: type_idx/return value, r1: ArtMethod*, r2: class, r9: Thread::Current, r3, r12: free.
-// Need to preserve r0 and r1 to the slow path.
-.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
- cbz r2, \slowPathLabel // Check null class
+// r0: type r9: Thread::Current, r1, r2, r3, r12: free.
+// Need to preserve r0 to the slow path.
+.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel
// Load thread_local_pos (r12) and
// thread_local_end (r3) with ldrd.
// Check constraints for ldrd.
@@ -1226,20 +1221,20 @@ END art_quick_alloc_object_rosalloc
#endif
ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
sub r12, r3, r12 // Compute the remaining buf size.
- ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
+ ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
cmp r3, r12 // Check if it fits.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
// Reload old thread_local_pos (r0)
// for the return value.
- ldr r0, [r9, #THREAD_LOCAL_POS_OFFSET]
- add r1, r0, r3
+ ldr r2, [r9, #THREAD_LOCAL_POS_OFFSET]
+ add r1, r2, r3
str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add r1, r1, #1
str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
- POISON_HEAP_REF r2
- str r2, [r0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ POISON_HEAP_REF r0
+ str r0, [r2, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
// Fence. This is "ish" not "ishst" so
// that the code after this allocation
// site will see the right values in
@@ -1247,71 +1242,46 @@ END art_quick_alloc_object_rosalloc
// Alternatively we could use "ishst"
// if we use load-acquire for the
// object size load.)
+ mov r0, r2
dmb ish
bx lr
.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
-ENTRY art_quick_alloc_object_tlab
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_tlab, TLAB).
+ENTRY art_quick_alloc_object_resolved_tlab
// Fast path tlab allocation.
- // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
- // r2, r3, r12: free.
+ // r0: type, r9: Thread::Current
+ // r1, r2, r3, r12: free.
#if defined(USE_READ_BARRIER)
mvn r0, #0 // Read barrier not supported here.
bx lr // Return -1.
#endif
- ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
- // Load the class (r2)
- ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
+.Lart_quick_alloc_object_resolved_tlab_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
- mov r2, r9 // Pass Thread::Current.
- bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
+ mov r1, r9 // Pass Thread::Current.
+ bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_tlab
+END art_quick_alloc_object_resolved_tlab
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-ENTRY art_quick_alloc_object_region_tlab
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+ENTRY art_quick_alloc_object_resolved_region_tlab
// Fast path tlab allocation.
- // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current, r2, r3, r12: free.
+ // r0: type, r9: Thread::Current, r1, r2, r3, r12: free.
#if !defined(USE_READ_BARRIER)
eor r0, r0, r0 // Read barrier must be enabled here.
sub r0, r0, #1 // Return -1.
bx lr
#endif
- ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
- // Load the class (r2)
- ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- // Read barrier for class load.
- ldr r3, [r9, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz r3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_marking
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_marking:
- cbz r2, .Lart_quick_alloc_object_region_tlab_slow_path // Null check for loading lock word.
- // Check lock word for mark bit, if marked do the allocation.
- ldr r3, [r2, MIRROR_OBJECT_LOCK_WORD_OFFSET]
- ands r3, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
- bne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark
- // the class.
- push {r0, r1, r3, lr} // Save registers. r3 is pushed only
- // to align sp by 16 bytes.
- mov r0, r2 // Pass the class as the first param.
- bl artReadBarrierMark
- mov r2, r0 // Get the (marked) class back.
- pop {r0, r1, r3, lr}
- b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
+.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
- mov r2, r9 // Pass Thread::Current.
- bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
+ mov r1, r9 // Pass Thread::Current.
+ bl artAllocObjectFromCodeResolvedRegionTLAB // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_region_tlab
+END art_quick_alloc_object_resolved_region_tlab
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
@@ -2040,3 +2010,83 @@ READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, r8
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, r9
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, r10
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, r11
+
+.extern artInvokePolymorphic
+ENTRY art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME r2
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ mov r0, #0 @ initialize 64-bit JValue as zero.
+ str r0, [sp, #-4]!
+ .cfi_adjust_cfa_offset 4
+ str r0, [sp, #-4]!
+ .cfi_adjust_cfa_offset 4
+ mov r0, sp @ pass JValue for return result as first argument.
+ bl artInvokePolymorphic @ artInvokePolymorphic(JValue, receiver, Thread*, SP)
+ sub r0, 'A' @ return value is descriptor of handle's return type.
+ cmp r0, 'Z' - 'A' @ check if value is in bounds of handler table
+ bgt .Lcleanup_and_return @ and clean-up if not.
+ adr r1, .Lhandler_table
+ tbb [r0, r1] @ branch to handler for return value based on return type.
+
+.Lstart_of_handlers:
+.Lstore_boolean_result:
+ ldrb r0, [sp] @ Copy boolean value to return value of this function.
+ b .Lcleanup_and_return
+.Lstore_char_result:
+ ldrh r0, [sp] @ Copy char value to return value of this function.
+ b .Lcleanup_and_return
+.Lstore_float_result:
+ vldr s0, [sp] @ Copy float value from JValue result to the context restored by
+ vstr s0, [sp, #16] @ RESTORE_SAVE_REFS_AND_ARGS_FRAME.
+ b .Lcleanup_and_return
+.Lstore_double_result:
+ vldr d0, [sp] @ Copy double value from JValue result to the context restored by
+ vstr d0, [sp, #16] @ RESTORE_SAVE_REFS_AND_ARGS_FRAME.
+ b .Lcleanup_and_return
+.Lstore_long_result:
+ ldr r1, [sp, #4] @ Copy the upper bits from JValue result to the context restored by
+ str r1, [sp, #80] @ RESTORE_SAVE_REFS_AND_ARGS_FRAME.
+ // Fall-through for lower bits.
+.Lstore_int_result:
+ ldr r0, [sp] @ Copy int value to return value of this function.
+ // Fall-through to clean up and return.
+.Lcleanup_and_return:
+ add sp, #8
+ .cfi_adjust_cfa_offset -8
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r2
+
+.macro HANDLER_TABLE_OFFSET handler_label
+ .byte (\handler_label - .Lstart_of_handlers) / 2
+.endm
+
+.Lhandler_table:
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // A
+ HANDLER_TABLE_OFFSET(.Lstore_int_result) // B (byte)
+ HANDLER_TABLE_OFFSET(.Lstore_char_result) // C (char)
+ HANDLER_TABLE_OFFSET(.Lstore_double_result) // D (double)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // E
+ HANDLER_TABLE_OFFSET(.Lstore_float_result) // F (float)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // G
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // H
+ HANDLER_TABLE_OFFSET(.Lstore_int_result) // I (int)
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // J (long)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // K
+ HANDLER_TABLE_OFFSET(.Lstore_int_result) // L (object)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // M
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // N
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // O
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // P
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Q
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // R
+ HANDLER_TABLE_OFFSET(.Lstore_int_result) // S (short)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // T
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // U
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // V (void)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // W
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // X
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Y
+ HANDLER_TABLE_OFFSET(.Lstore_boolean_result) // Z (boolean)
+.purgem HANDLER_TABLE_OFFSET
+END art_quick_invoke_polymorphic
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index b88515f21f..3b3783cad4 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1669,7 +1669,6 @@ END art_quick_resolve_string
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
// Comment out allocators that have arm64 specific asm.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) implemented in asm
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
@@ -1682,27 +1681,23 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
- // x2-x7: free.
- ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
- // Load the class (x2)
- ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
+ // x0: type, xSELF(x19): Thread::Current
+ // x1-x7: free.
ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// ldp won't work due to large offset.
ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp x3, x4
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
- ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3)
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ ldr w3, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3)
cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// finalizable and initialization
// checks.
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size. Since the size is
// already aligned we can combine the
@@ -1715,7 +1710,7 @@ ENTRY art_quick_alloc_object_rosalloc
// Load the free list head (x3). This
// will be the return val.
ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
- cbz x3, .Lart_quick_alloc_object_rosalloc_slow_path
+ cbz x3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
@@ -1728,8 +1723,8 @@ ENTRY art_quick_alloc_object_rosalloc
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF w2
- str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
+ POISON_HEAP_REF w0
+ str w0, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
// Fence. This is "ish" not "ishst" so
// that it also ensures ordering of
// the object size load with respect
@@ -1759,13 +1754,13 @@ ENTRY art_quick_alloc_object_rosalloc
mov x0, x3 // Set the return value and return.
ret
-.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- mov x2, xSELF // pass Thread::Current
- bl artAllocObjectFromCodeRosAlloc // (uint32_t type_idx, Method* method, Thread*)
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
+ mov x1, xSELF // pass Thread::Current
+ bl artAllocObjectFromCodeResolvedRosAlloc // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_rosalloc
+END art_quick_alloc_object_resolved_rosalloc
// The common fast path code for art_quick_alloc_array_region_tlab.
@@ -1834,16 +1829,6 @@ END art_quick_alloc_object_rosalloc
ret
.endm
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
-//
-// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current
-// x3-x7: free.
-// Need to preserve x0 and x1 to the slow path.
-.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
- cbz x2, \slowPathLabel // Check null class
- ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel
-.endm
-
// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as
// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED.
.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
@@ -1853,20 +1838,18 @@ END art_quick_alloc_object_rosalloc
.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
- ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
+ ldr w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
add x6, x4, x7 // Add object size to tlab pos.
cmp x6, x5 // Check if it fits, overflow works
// since the tlab pos and end are 32
// bit values.
bhi \slowPathLabel
- // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
- mov x0, x4
str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add x5, x5, #1
str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
- POISON_HEAP_REF w2
- str w2, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ POISON_HEAP_REF w0
+ str w0, [x4, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
// Fence. This is "ish" not "ishst" so
// that the code after this allocation
// site will see the right values in
@@ -1874,91 +1857,52 @@ END art_quick_alloc_object_rosalloc
// Alternatively we could use "ishst"
// if we use load-acquire for the
// object size load.)
+ mov x0, x4
dmb ish
ret
.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
-ENTRY art_quick_alloc_object_tlab
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB).
+ENTRY art_quick_alloc_object_resolved_tlab
// Fast path tlab allocation.
- // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
- // x2-x7: free.
+ // x0: type, xSELF(x19): Thread::Current
+ // x1-x7: free.
#if defined(USE_READ_BARRIER)
mvn x0, xzr // Read barrier not supported here.
ret // Return -1.
#endif
- ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
- // Load the class (x2)
- ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
- mov x2, xSELF // Pass Thread::Current.
- bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
+ ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_object_resolved_tlab_slow_path
+.Lart_quick_alloc_object_resolved_tlab_slow_path:
+ SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
+ mov x1, xSELF // Pass Thread::Current.
+ bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_tlab
+END art_quick_alloc_object_resolved_tlab
// The common code for art_quick_alloc_object_*region_tlab
-.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved, read_barrier
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB name, entrypoint, fast_path
ENTRY \name
// Fast path region tlab allocation.
- // x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
- // If is_resolved is 1 then x0 is the resolved type, otherwise it is the index.
- // x2-x7: free.
+ // x0: type, xSELF(x19): Thread::Current
+ // x1-x7: free.
#if !defined(USE_READ_BARRIER)
mvn x0, xzr // Read barrier must be enabled here.
ret // Return -1.
#endif
-.if \is_resolved
- mov x2, x0 // class is actually stored in x0 already
-.else
- ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
- // Load the class (x2)
- ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- // If the class is null, go slow path. The check is required to read the lock word.
- cbz w2, .Lslow_path\name
-.endif
-.if \read_barrier
- // Most common case: GC is not marking.
- ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz x3, .Lmarking\name
-.endif
.Ldo_allocation\name:
\fast_path .Lslow_path\name
-.Lmarking\name:
-.if \read_barrier
- // GC is marking, check the lock word of the class for the mark bit.
- // Class is not null, check mark bit in lock word.
- ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- // If the bit is not zero, do the allocation.
- tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name
- // The read barrier slow path. Mark
- // the class.
- SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 // Save registers (x0, x1, lr).
- SAVE_REG xLR, 24 // Align sp by 16 bytes.
- mov x0, x2 // Pass the class as the first param.
- bl artReadBarrierMark
- mov x2, x0 // Get the (marked) class back.
- RESTORE_REG xLR, 24
- RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 // Restore registers.
- b .Ldo_allocation\name
-.endif
.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
- mov x2, xSELF // Pass Thread::Current.
- bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
+ mov x1, xSELF // Pass Thread::Current.
+ bl \entrypoint // (mirror::Class*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END \name
.endm
-// Use ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since the null check is already done in GENERATE_ALLOC_OBJECT_TLAB.
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 0, 1
-// No read barrier for the resolved or initialized cases since the caller is responsible for the
-// read barrier due to the to-space invariant.
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1, 0
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1, 0
+GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED
+GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED
// TODO: We could use this macro for the normal tlab allocator too.
@@ -2623,3 +2567,82 @@ READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg26, w26, x26
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg27, w27, x27
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg28, w28, x28
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29
+
+.extern artInvokePolymorphic
+ENTRY art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // Save callee saves in case allocation triggers GC.
+ mov x2, xSELF
+ mov x3, sp
+ INCREASE_FRAME 16 // Reserve space for JValue result.
+ str xzr, [sp, #0] // Initialize result to zero.
+ mov x0, sp // Set r0 to point to result.
+ bl artInvokePolymorphic // ArtInvokePolymorphic(result, receiver, thread, save_area)
+ uxtb w0, w0 // Result is the return type descriptor as a char.
+ sub w0, w0, 'A' // Convert to zero based index.
+ cmp w0, 'Z' - 'A'
+ bhi .Lcleanup_and_return // Clean-up if out-of-bounds.
+ adrp x1, .Lhandler_table // Compute address of handler table.
+ add x1, x1, :lo12:.Lhandler_table
+ ldrb w0, [x1, w0, uxtw] // Lookup handler offset in handler table.
+ adr x1, .Lstart_of_handlers
+ add x0, x1, w0, sxtb #2 // Convert relative offset to absolute address.
+ br x0 // Branch to handler.
+
+.Lstart_of_handlers:
+.Lstore_boolean_result:
+ ldrb w0, [sp]
+ b .Lcleanup_and_return
+.Lstore_char_result:
+ ldrh w0, [sp]
+ b .Lcleanup_and_return
+.Lstore_float_result:
+ ldr s0, [sp]
+ str s0, [sp, #32]
+ b .Lcleanup_and_return
+.Lstore_double_result:
+ ldr d0, [sp]
+ str d0, [sp, #32]
+ b .Lcleanup_and_return
+.Lstore_long_result:
+ ldr x0, [sp]
+ // Fall-through
+.Lcleanup_and_return:
+ DECREASE_FRAME 16
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+
+ .section .rodata // Place handler table in read-only section away from text.
+ .align 2
+.macro HANDLER_TABLE_OFFSET handler_label
+ .byte (\handler_label - .Lstart_of_handlers) / 4
+.endm
+.Lhandler_table:
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // A
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // B (byte)
+ HANDLER_TABLE_OFFSET(.Lstore_char_result) // C (char)
+ HANDLER_TABLE_OFFSET(.Lstore_double_result) // D (double)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // E
+ HANDLER_TABLE_OFFSET(.Lstore_float_result) // F (float)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // G
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // H
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // I (int)
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // J (long)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // K
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // L (object - references are compressed and only 32-bits)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // M
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // N
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // O
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // P
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Q
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // R
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // S (short)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // T
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // U
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // V (void)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // W
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // X
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Y
+ HANDLER_TABLE_OFFSET(.Lstore_boolean_result) // Z (boolean)
+ .text
+
+END art_quick_invoke_polymorphic
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 3e8cdc9374..3acc0a9d5b 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1831,116 +1831,10 @@ END \name
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
-
- # Fast path rosalloc allocation
- # a0: type_idx
- # a1: ArtMethod*
- # s1: Thread::Current
- # -----------------------------
- # t0: class
- # t1: object size
- # t2: rosalloc run
- # t3: thread stack top offset
- # t4: thread stack bottom offset
- # v0: free list head
- #
- # t5, t6 : temps
-
- lw $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_32($a1) # Load dex cache resolved types
- # array.
-
- sll $t5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value.
- addu $t5, $t0, $t5 # Compute the index.
- lw $t0, 0($t5) # Load class (t0).
- beqz $t0, .Lart_quick_alloc_object_rosalloc_slow_path
-
- li $t6, MIRROR_CLASS_STATUS_INITIALIZED
- lw $t5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status.
- bne $t5, $t6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Add a fake dependence from the following access flag and size loads to the status load. This
- # is to prevent those loads from being reordered above the status load and reading wrong values.
- xor $t5, $t5, $t5
- addu $t0, $t0, $t5
-
- lw $t5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has
- li $t6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable.
- and $t6, $t5, $t6
- bnez $t6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- lw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation
- lw $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # stack has any room left.
- bgeu $t3, $t4, .Lart_quick_alloc_object_rosalloc_slow_path
-
- lw $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1).
- li $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
- # allocation.
- bgtu $t1, $t5, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket
- # quantum size and divide by the quantum size and subtract by 1.
-
- addiu $t1, $t1, -1 # Decrease obj size and shift right
- srl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # by quantum.
-
- sll $t2, $t1, POINTER_SIZE_SHIFT
- addu $t2, $t2, $s1
- lw $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2).
-
- # Load the free list head (v0).
- # NOTE: this will be the return val.
-
- lw $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
- beqz $v0, .Lart_quick_alloc_object_rosalloc_slow_path
- nop
-
- # Load the next pointer of the head and update the list head with the next pointer.
-
- lw $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
- sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-
- # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
- # asserted to match.
-
-#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
-#error "Class pointer needs to overwrite next pointer."
-#endif
-
- POISON_HEAP_REF $t0
- sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0)
-
- # Push the new object onto the thread local allocation stack and increment the thread local
- # allocation stack top.
-
- sw $v0, 0($t3)
- addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
- sw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
-
- # Decrement the size of the free list.
-
- lw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
- addiu $t5, $t5, -1
- sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-
- sync # Fence.
-
- jalr $zero, $ra
- nop
-
- .Lart_quick_alloc_object_rosalloc_slow_path:
-
- SETUP_SAVE_REFS_ONLY_FRAME
- la $t9, artAllocObjectFromCodeRosAlloc
- jalr $t9
- move $a2, $s1 # Pass self as argument.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-
-END art_quick_alloc_object_rosalloc
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
@@ -2336,7 +2230,7 @@ ENTRY_NO_GP art_quick_indexof
li $v0, -1 # return -1;
sll $v0, $a2, 1 # $a0 += $a2 * 2
- addu $a0, $a0, $v0 # " " " " "
+ addu $a0, $a0, $v0 # " ditto "
move $v0, $a2 # Set i to fromIndex.
1:
@@ -2386,3 +2280,65 @@ ENTRY_NO_GP art_quick_string_compareto
j $ra
nop
END art_quick_string_compareto
+
+.extern artInvokePolymorphic
+ENTRY art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
+ move $a2, rSELF # Make $a2 an alias for the current Thread.
+ move $a3, $sp # Make $a3 a pointer to the saved frame context.
+ addiu $sp, $sp, -24 # Reserve space for JValue result and 4 words for callee.
+ .cfi_adjust_cfa_offset 24
+ sw $zero, 20($sp) # Initialize JValue result.
+ sw $zero, 16($sp)
+ addiu $a0, $sp, 16 # Make $a0 a pointer to the JValue result
+ la $t9, artInvokePolymorphic
+ jalr $t9 # (result, receiver, Thread*, context)
+ nop
+.macro MATCH_RETURN_TYPE c, handler
+ li $t0, \c
+ beq $v0, $t0, \handler
+.endm
+ MATCH_RETURN_TYPE 'V', .Lcleanup_and_return
+ MATCH_RETURN_TYPE 'L', .Lstore_int_result
+ MATCH_RETURN_TYPE 'I', .Lstore_int_result
+ MATCH_RETURN_TYPE 'J', .Lstore_long_result
+ MATCH_RETURN_TYPE 'B', .Lstore_int_result
+ MATCH_RETURN_TYPE 'C', .Lstore_char_result
+ MATCH_RETURN_TYPE 'D', .Lstore_double_result
+ MATCH_RETURN_TYPE 'F', .Lstore_float_result
+ MATCH_RETURN_TYPE 'S', .Lstore_int_result
+.purgem MATCH_RETURN_TYPE
+ nop
+ b .Lcleanup_and_return
+ nop
+.Lstore_boolean_result:
+ lbu $v0, 16($sp) # Move byte from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_char_result:
+ lhu $v0, 16($sp) # Move char from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_double_result:
+.Lstore_float_result:
+ LDu $f0, $f1, 16, $sp, $t0 # Move double/float from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_long_result:
+ lw $v1, 20($sp) # Move upper bits from JValue result to return value register.
+ // Fall-through for lower bits.
+.Lstore_int_result:
+ lw $v0, 16($sp) # Move lower bits from JValue result to return value register.
+ // Fall-through to clean up and return.
+.Lcleanup_and_return:
+ addiu $sp, $sp, 24 # Remove space for JValue result and the 4 words for the callee.
+ .cfi_adjust_cfa_offset -24
+ lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # Load Thread::Current()->exception_
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ bnez $t7, 1f # Success if no exception is pending.
+ nop
+ jalr $zero, $ra
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+END art_quick_invoke_polymorphic
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index 5606c1d858..5757906618 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -70,7 +70,7 @@ uint32_t Mips64InstructionSetFeatures::AsBitmap() const {
}
std::string Mips64InstructionSetFeatures::GetFeatureString() const {
- return "";
+ return "default";
}
std::unique_ptr<const InstructionSetFeatures>
diff --git a/runtime/arch/mips64/instruction_set_features_mips64_test.cc b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
index 1d037947fa..380c4e5433 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64_test.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
@@ -27,7 +27,7 @@ TEST(Mips64InstructionSetFeaturesTest, Mips64Features) {
ASSERT_TRUE(mips64_features.get() != nullptr) << error_msg;
EXPECT_EQ(mips64_features->GetInstructionSet(), kMips64);
EXPECT_TRUE(mips64_features->Equals(mips64_features.get()));
- EXPECT_STREQ("", mips64_features->GetFeatureString().c_str());
+ EXPECT_STREQ("default", mips64_features->GetFeatureString().c_str());
EXPECT_EQ(mips64_features->AsBitmap(), 0U);
}
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 0861d2d73e..ae786fe626 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1775,107 +1775,9 @@ END \name
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
-
- # Fast path rosalloc allocation
- # a0: type_idx
- # a1: ArtMethod*
- # s1: Thread::Current
- # -----------------------------
- # t0: class
- # t1: object size
- # t2: rosalloc run
- # t3: thread stack top offset
- # a4: thread stack bottom offset
- # v0: free list head
- #
- # a5, a6 : temps
-
- ld $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_64($a1) # Load dex cache resolved types array.
-
- dsll $a5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value.
- daddu $a5, $t0, $a5 # Compute the index.
- lwu $t0, 0($a5) # Load class (t0).
- beqzc $t0, .Lart_quick_alloc_object_rosalloc_slow_path
-
- li $a6, MIRROR_CLASS_STATUS_INITIALIZED
- lwu $a5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status.
- bnec $a5, $a6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Add a fake dependence from the following access flag and size loads to the status load. This
- # is to prevent those loads from being reordered above the status load and reading wrong values.
- xor $a5, $a5, $a5
- daddu $t0, $t0, $a5
-
- lwu $a5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has
- li $a6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable.
- and $a6, $a5, $a6
- bnezc $a6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- ld $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation stack
- ld $a4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # has any room left.
- bgeuc $t3, $a4, .Lart_quick_alloc_object_rosalloc_slow_path
-
- lwu $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1).
- li $a5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
- # allocation.
- bltuc $a5, $t1, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket
- # quantum size and divide by the quantum size and subtract by 1.
- daddiu $t1, $t1, -1 # Decrease obj size and shift right by
- dsrl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # quantum.
-
- dsll $t2, $t1, POINTER_SIZE_SHIFT
- daddu $t2, $t2, $s1
- ld $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2).
-
- # Load the free list head (v0).
- # NOTE: this will be the return val.
- ld $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
- beqzc $v0, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Load the next pointer of the head and update the list head with the next pointer.
- ld $a5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
- sd $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-
- # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
- # asserted to match.
-
-#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
-#error "Class pointer needs to overwrite next pointer."
-#endif
-
- POISON_HEAP_REF $t0
- sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0)
-
- # Push the new object onto the thread local allocation stack and increment the thread local
- # allocation stack top.
- sd $v0, 0($t3)
- daddiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
- sd $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
-
- # Decrement the size of the free list.
- lw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
- addiu $a5, $a5, -1
- sw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-
- sync # Fence.
-
- jalr $zero, $ra
- .cpreturn # Restore gp from t8 in branch delay slot.
-
-.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME
- jal artAllocObjectFromCodeRosAlloc
- move $a2 ,$s1 # Pass self as argument.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-
-END art_quick_alloc_object_rosalloc
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
@@ -2203,7 +2105,7 @@ ENTRY_NO_GP art_quick_indexof
li $v0,-1 # return -1;
sll $v0,$a2,1 # $a0 += $a2 * 2
- daddu $a0,$a0,$v0 # " " " " "
+ daddu $a0,$a0,$v0 # " ditto "
move $v0,$a2 # Set i to fromIndex.
1:
@@ -2222,4 +2124,65 @@ ENTRY_NO_GP art_quick_indexof
nop
END art_quick_indexof
+.extern artInvokePolymorphic
+ENTRY art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
+ move $a2, rSELF # Make $a2 an alias for the current Thread.
+ move $a3, $sp # Make $a3 a pointer to the saved frame context.
+ daddiu $sp, $sp, -8 # Reserve space for JValue result.
+ .cfi_adjust_cfa_offset 8
+ sd $zero, 0($sp) # Initialize JValue result.
+ move $a0, $sp # Make $a0 a pointer to the JValue result
+ jal artInvokePolymorphic # (result, receiver, Thread*, context)
+ nop
+.macro MATCH_RETURN_TYPE c, handler
+ li $t0, \c
+ beq $v0, $t0, \handler
+.endm
+ MATCH_RETURN_TYPE 'V', .Lcleanup_and_return
+ MATCH_RETURN_TYPE 'L', .Lstore_ref_result
+ MATCH_RETURN_TYPE 'I', .Lstore_long_result
+ MATCH_RETURN_TYPE 'J', .Lstore_long_result
+ MATCH_RETURN_TYPE 'B', .Lstore_long_result
+ MATCH_RETURN_TYPE 'C', .Lstore_char_result
+ MATCH_RETURN_TYPE 'D', .Lstore_double_result
+ MATCH_RETURN_TYPE 'F', .Lstore_float_result
+ MATCH_RETURN_TYPE 'S', .Lstore_long_result
+.purgem MATCH_RETURN_TYPE
+ nop
+ b .Lcleanup_and_return
+ nop
+.Lstore_boolean_result:
+ lbu $v0, 0($sp) # Move byte from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_char_result:
+ lhu $v0, 0($sp) # Move char from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_double_result:
+.Lstore_float_result:
+ l.d $f0, 0($sp) # Move double/float from JValue result to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_ref_result:
+ lwu $v0, 0($sp) # Move zero extended lower 32-bits to return value register.
+ b .Lcleanup_and_return
+ nop
+.Lstore_long_result:
+ ld $v0, 0($sp) # Move long from JValue result to return value register.
+ // Fall-through to clean up and return.
+.Lcleanup_and_return:
+ daddiu $sp, $sp, 8 # Remove space for JValue result.
+ .cfi_adjust_cfa_offset -8
+ ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # Load Thread::Current()->exception_
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ bnez $t0, 1f # Success if no exception is pending.
+ nop
+ jalr $zero, $ra
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+END art_quick_invoke_polymorphic
+
.set pop
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index db2fdcabea..abd9046174 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -15,15 +15,13 @@
*/
.macro GENERATE_ALLOC_ENTRYPOINTS c_suffix, cxx_suffix
-// Called by managed code to allocate an object.
-TWO_ARG_DOWNCALL art_quick_alloc_object\c_suffix, artAllocObjectFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object of a resolved class.
-TWO_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object of an initialized class.
-TWO_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object when the caller doesn't know whether it has access
// to the created type.
-TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check\c_suffix, artAllocObjectFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks\c_suffix, artAllocObjectFromCodeWithChecks\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array.
THREE_ARG_DOWNCALL art_quick_alloc_array\c_suffix, artAllocArrayFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array of a resolve class.
@@ -61,14 +59,12 @@ GENERATE_ALLOC_ENTRYPOINTS _region_tlab_instrumented, RegionTLABInstrumented
// Generate the allocation entrypoints for each allocator. This is used as an alternative to
// GNERATE_ALL_ALLOC_ENTRYPOINTS for selectively implementing allocation fast paths in
// hand-written assembly.
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object ## c_suffix, artAllocObjectFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks ## c_suffix, artAllocObjectFromCodeWithChecks ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
@@ -93,8 +89,7 @@ GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
@@ -109,8 +104,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_TLAB_ALLOCATOR
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
@@ -129,7 +123,6 @@ GENERATE_ALLOC_ENTRYPOINTS_FOR_TLAB_ALLOCATOR
.endm
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
@@ -142,7 +135,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
@@ -156,8 +148,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMal
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc)
@@ -169,7 +160,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
@@ -182,7 +172,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc_instrumented, RosAl
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
@@ -195,7 +184,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
@@ -208,7 +196,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer_instrumented, B
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
@@ -221,7 +208,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab_instrumented, TLABInstr
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region, Region)
@@ -234,7 +220,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
@@ -247,7 +232,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionI
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 9e385f839f..ee65fa8ab0 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1062,12 +1062,8 @@ TEST_F(StubTest, AllocObject) {
EXPECT_FALSE(self->IsExceptionPending());
{
- // Use an arbitrary method from c to use as referrer
- size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex().index_), // type_idx
- // arbitrary
- reinterpret_cast<size_t>(c->GetVirtualMethod(0, kRuntimePointerSize)),
- 0U,
- StubTest::GetEntrypoint(self, kQuickAllocObject),
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
+ StubTest::GetEntrypoint(self, kQuickAllocObjectWithChecks),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1078,8 +1074,6 @@ TEST_F(StubTest, AllocObject) {
}
{
- // We can use null in the second argument as we do not need a method here (not used in
- // resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
self);
@@ -1092,8 +1086,6 @@ TEST_F(StubTest, AllocObject) {
}
{
- // We can use null in the second argument as we do not need a method here (not used in
- // resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
self);
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c6f4c0346f..1d979d852e 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -468,7 +468,7 @@ TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThro
* The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
* of the target Method* in r0 and method->code_ in r1.
*
- * If unsuccessful, the helper will return null/null will bea pending exception in the
+ * If unsuccessful, the helper will return null/null and there will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -956,52 +956,42 @@ END_MACRO
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_rosalloc
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
+DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // eax: uint32_t type_idx/return value, ecx: ArtMethod*
- // ebx, edx: free
- PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
- // Load the class (edx)
- movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
- testl %edx, %edx // Check null class
- jz .Lart_quick_alloc_object_rosalloc_slow_path
-
+ // eax: type/return value
+ // ecx, ebx, edx: free
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
// Check if the thread local allocation
// stack has room
- movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %edi
- cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %edi
- jae .Lart_quick_alloc_object_rosalloc_slow_path
+ movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %ecx
+ cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %ecx
+ jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %edi // Load the object size (edi)
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx // Load the object size (ecx)
// Check if the size is for a thread
// local allocation. Also does the
// finalizable and initialization check.
- cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %edi
- ja .Lart_quick_alloc_object_rosalloc_slow_path
- shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %edi // Calculate the rosalloc bracket index
+ cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %ecx
+ ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %ecx // Calculate the rosalloc bracket index
// from object size.
// Load thread local rosalloc run (ebx)
// Subtract __SIZEOF_POINTER__ to subtract
// one from edi as there is no 0 byte run
// and the size is already aligned.
- movl (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%ebx, %edi, __SIZEOF_POINTER__), %ebx
+ movl (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%ebx, %ecx, __SIZEOF_POINTER__), %ebx
// Load free_list head (edi),
// this will be the return value.
- movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %edi
- test %edi, %edi
- jz .Lart_quick_alloc_object_rosalloc_slow_path
+ movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %ecx
+ jecxz .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Point of no slow path. Won't go to
- // the slow path from here on. Ok to
- // clobber eax and ecx.
- movl %edi, %eax
+ // the slow path from here on.
// Load the next pointer of the head
// and update head of free list with
// next pointer
- movl ROSALLOC_SLOT_NEXT_OFFSET(%eax), %edi
- movl %edi, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx)
+ movl ROSALLOC_SLOT_NEXT_OFFSET(%ecx), %edx
+ movl %edx, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx)
// Decrement size of free list by 1
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%ebx)
// Store the class pointer in the
@@ -1011,141 +1001,104 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%eax)
+ POISON_HEAP_REF eax
+ movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%ecx)
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
// Push the new object onto the thread
// local allocation stack and
// increment the thread local
// allocation stack top.
- movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %edi
- movl %eax, (%edi)
- addl LITERAL(COMPRESSED_REFERENCE_SIZE), %edi
- movl %edi, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx)
+ movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %eax
+ movl %ecx, (%eax)
+ addl LITERAL(COMPRESSED_REFERENCE_SIZE), %eax
+ movl %eax, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx)
// No fence needed for x86.
- POP edi
+ movl %ecx, %eax // Move object to return register
ret
-.Lart_quick_alloc_object_rosalloc_slow_path:
- POP edi
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // alignment padding
+ subl LITERAL(8), %esp // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx
PUSH eax
- call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
+ call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc) // cxx_name(arg0, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
-END_FUNCTION art_quick_alloc_object_rosalloc
+END_FUNCTION art_quick_alloc_object_resolved_rosalloc
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+// The common fast path code for art_quick_alloc_object_resolved_tlab
+// and art_quick_alloc_object_resolved_region_tlab.
//
-// EAX: type_idx/return_value, ECX: ArtMethod*, EDX: the class.
-MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
- testl %edx, %edx // Check null class
- jz VAR(slowPathLabel)
+// EAX: type/return_value
+MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
movl THREAD_LOCAL_END_OFFSET(%ebx), %edi // Load thread_local_end.
subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi // Compute the remaining buffer size.
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %esi // Load the object size.
- cmpl %edi, %esi // Check if it fits.
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx // Load the object size.
+ cmpl %edi, %ecx // Check if it fits.
ja VAR(slowPathLabel)
- movl THREAD_LOCAL_POS_OFFSET(%ebx), %eax // Load thread_local_pos
+ movl THREAD_LOCAL_POS_OFFSET(%ebx), %edx // Load thread_local_pos
// as allocated object.
- addl %eax, %esi // Add the object size.
- movl %esi, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos.
+ addl %edx, %ecx // Add the object size.
+ movl %ecx, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos.
incl THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
// Store the class pointer in the header.
// No fence needed for x86.
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%eax)
+ POISON_HEAP_REF eax
+ movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%edx)
+ movl %edx, %eax
POP edi
- POP esi
ret // Fast path succeeded.
END_MACRO
-// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
-MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
+// The common slow path code for art_quick_alloc_object_resolved_tlab
+// and art_quick_alloc_object_resolved_region_tlab.
+MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH, cxx_name)
POP edi
- POP esi
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // alignment padding
+ subl LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx
PUSH eax
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
addl LITERAL(16), %esp
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). May be called
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be called
// for CC if the GC is not marking.
-DEFINE_FUNCTION art_quick_alloc_object_tlab
+DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
// Fast path tlab allocation.
- // EAX: uint32_t type_idx/return value, ECX: ArtMethod*.
- // EBX, EDX: free.
- PUSH esi
+ // EAX: type
+ // EBX, ECX, EDX: free.
PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
- // Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
- movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB
-END_FUNCTION art_quick_alloc_object_tlab
-
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB).
-DEFINE_FUNCTION art_quick_alloc_object_region_tlab
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
+.Lart_quick_alloc_object_resolved_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB
+END_FUNCTION art_quick_alloc_object_resolved_tlab
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB).
+DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab
// Fast path region tlab allocation.
- // EAX: uint32_t type_idx/return value, ECX: ArtMethod*.
- // EBX, EDX: free.
+ // EAX: type/return value
+ // EBX, ECX, EDX: free.
#if !defined(USE_READ_BARRIER)
int3
int3
#endif
- PUSH esi
PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
- // Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
- movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
- // Read barrier for class load.
- cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
- // Null check so that we can load the lock word.
- testl %edx, %edx
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
- // Check the mark bit, if it is 1 return.
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark the class.
- PUSH eax
- PUSH ecx
- // Outgoing argument set up
- subl MACRO_LITERAL(8), %esp // Alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH edx // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
- movl %eax, %edx
- addl MACRO_LITERAL(12), %esp
- CFI_ADJUST_CFA_OFFSET(-12)
- POP ecx
- POP eax
- jmp .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB
-END_FUNCTION art_quick_alloc_object_region_tlab
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
+.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
+END_FUNCTION art_quick_alloc_object_resolved_region_tlab
+
DEFINE_FUNCTION art_quick_resolve_string
SETUP_SAVE_EVERYTHING_FRAME ebx, ebx
@@ -2270,5 +2223,99 @@ DEFINE_FUNCTION art_quick_osr_stub
jmp *%ebx
END_FUNCTION art_quick_osr_stub
+DEFINE_FUNCTION art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME ebx, ebx // Save frame.
+ mov %esp, %edx // Remember SP.
+ subl LITERAL(16), %esp // Make space for JValue result.
+ CFI_ADJUST_CFA_OFFSET(16)
+ movl LITERAL(0), (%esp) // Initialize result to zero.
+ movl LITERAL(0), 4(%esp)
+ mov %esp, %eax // Store pointer to JValue result in eax.
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH ecx // pass receiver (method handle)
+ PUSH eax // pass JResult
+ call SYMBOL(artInvokePolymorphic) // (result, receiver, Thread*, SP)
+ subl LITERAL('A'), %eax // Eliminate out of bounds options
+ cmpb LITERAL('Z' - 'A'), %al
+ ja .Lcleanup_and_return
+ movzbl %al, %eax
+ call .Lput_eip_in_ecx
+.Lbranch_start:
+ movl %ecx, %edx
+ add $(.Lhandler_table - .Lbranch_start), %edx // Make EDX point to handler_table.
+ leal (%edx, %eax, 2), %eax // Calculate address of entry in table.
+ movzwl (%eax), %eax // Lookup relative branch in table.
+ addl %ecx, %eax // Add EIP relative offset.
+ jmp *%eax // Branch to handler.
+
+ // Handlers for different return types.
+.Lstore_boolean_result:
+ movzbl 16(%esp), %eax // Copy boolean result to the accumulator.
+ jmp .Lcleanup_and_return
+.Lstore_char_result:
+ movzwl 16(%esp), %eax // Copy char result to the accumulator.
+ jmp .Lcleanup_and_return
+.Lstore_float_result:
+ movd 16(%esp), %xmm0 // Copy float result to the context restored by
+ movd %xmm0, 36(%esp) // RESTORE_SAVE_REFS_ONLY_FRAME.
+ jmp .Lcleanup_and_return
+.Lstore_double_result:
+ movsd 16(%esp), %xmm0 // Copy double result to the context restored by
+ movsd %xmm0, 36(%esp) // RESTORE_SAVE_REFS_ONLY_FRAME.
+ jmp .Lcleanup_and_return
+.Lstore_long_result:
+ movl 20(%esp), %edx // Copy upper-word of result to the context restored by
+ movl %edx, 72(%esp) // RESTORE_SAVE_REFS_ONLY_FRAME.
+ // Fall-through for lower bits.
+.Lstore_int_result:
+ movl 16(%esp), %eax // Copy int result to the accumulator.
+ // Fall-through to clean up and return.
+.Lcleanup_and_return:
+ addl LITERAL(32), %esp // Pop arguments and stack allocated JValue result.
+ CFI_ADJUST_CFA_OFFSET(-32)
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ RETURN_OR_DELIVER_PENDING_EXCEPTION
+
+.Lput_eip_in_ecx: // Internal function that puts address of
+ movl 0(%esp), %ecx // next instruction into ECX when CALL
+ ret
+
+ // Handler table to handlers for given type.
+.Lhandler_table:
+MACRO1(HANDLER_TABLE_ENTRY, handler_label)
+ // NB some tools require 16-bits for relocations. Shouldn't need adjusting.
+ .word RAW_VAR(handler_label) - .Lbranch_start
+END_MACRO
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // A
+ HANDLER_TABLE_ENTRY(.Lstore_int_result) // B (byte)
+ HANDLER_TABLE_ENTRY(.Lstore_char_result) // C (char)
+ HANDLER_TABLE_ENTRY(.Lstore_double_result) // D (double)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // E
+ HANDLER_TABLE_ENTRY(.Lstore_float_result) // F (float)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // G
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // H
+ HANDLER_TABLE_ENTRY(.Lstore_int_result) // I (int)
+ HANDLER_TABLE_ENTRY(.Lstore_long_result) // J (long)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // K
+ HANDLER_TABLE_ENTRY(.Lstore_int_result) // L (object)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // M
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // N
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // O
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // P
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // Q
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // R
+ HANDLER_TABLE_ENTRY(.Lstore_int_result) // S (short)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // T
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // U
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // V (void)
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // W
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // X
+ HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // Y
+ HANDLER_TABLE_ENTRY(.Lstore_boolean_result) // Z (boolean)
+
+END_FUNCTION art_quick_invoke_polymorphic
+
// TODO: implement these!
UNIMPLEMENTED art_quick_memcmp16
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 4c46b08a9e..28034c9bae 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -983,7 +983,6 @@ GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
// Comment out allocators that have x86_64 specific asm.
// Region TLAB:
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
@@ -996,11 +995,9 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
// Normal TLAB:
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
@@ -1009,29 +1006,25 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_rosalloc
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
+DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // RDI: type_idx, RSI: ArtMethod*, RAX: return value
- // RDX, RCX, R8, R9: free.
- movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- // Load the class (edx)
- movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
- testl %edx, %edx // Check null class
- jz .Lart_quick_alloc_object_rosalloc_slow_path
+ // RDI: mirror::Class*, RAX: return value
+ // RSI, RDX, RCX, R8, R9: free.
// Check if the thread local
// allocation stack has room.
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
movq THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8), %rcx // rcx = alloc stack top.
cmpq THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx
- jae .Lart_quick_alloc_object_rosalloc_slow_path
+ jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Load the object size
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %eax
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %eax
// Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax
- ja .Lart_quick_alloc_object_rosalloc_slow_path
+ ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size.
shrq LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax
@@ -1045,7 +1038,7 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc
// will be the return val.
movq (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax
testq %rax, %rax
- jz .Lart_quick_alloc_object_rosalloc_slow_path
+ jz .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber rdi and rsi.
// Push the new object onto the thread
// local allocation stack and
@@ -1066,17 +1059,17 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
+ POISON_HEAP_REF edi
+ movl %edi, MIRROR_OBJECT_CLASS_OFFSET(%rax)
// Decrement the size of the free list
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%r9)
// No fence necessary for x86.
ret
-.Lart_quick_alloc_object_rosalloc_slow_path:
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_alloc_object_rosalloc
@@ -1095,19 +1088,19 @@ END_MACRO
// TODO: delete ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH since it is the same as
// ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH.
//
-// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
-// RCX: scratch, r8: Thread::Current().
+// RDI: the class, RAX: return value.
+// RCX, RSI, RDX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH(RAW_VAR(slowPathLabel))
END_MACRO
// The fast path code for art_quick_alloc_object_initialized_region_tlab.
//
-// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
-// RCX: scratch, r8: Thread::Current().
+// RDI: the class, RSI: ArtMethod*, RAX: return value.
+// RCX, RSI, RDX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH, slowPathLabel)
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %ecx // Load the object size.
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %ecx // Load the object size.
movq THREAD_LOCAL_POS_OFFSET(%r8), %rax
addq %rax, %rcx // Add size to pos, note that these
// are both 32 bit ints, overflow
@@ -1120,8 +1113,8 @@ MACRO1(ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH, slowPathLabel)
// Store the class pointer in the
// header.
// No fence needed for x86.
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
+ POISON_HEAP_REF edi
+ movl %edi, MIRROR_OBJECT_CLASS_OFFSET(%rax)
ret // Fast path succeeded.
END_MACRO
@@ -1164,12 +1157,14 @@ MACRO1(ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED, slowPathLabel)
ret // Fast path succeeded.
END_MACRO
-// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+
+// The common slow path code for art_quick_alloc_object_{resolved, initialized}_tlab
+// and art_quick_alloc_object_{resolved, initialized}_region_tlab.
MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
@@ -1184,26 +1179,11 @@ MACRO1(ALLOC_ARRAY_TLAB_SLOW_PATH, cxx_name)
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). May be
-// called with CC if the GC is not active.
-DEFINE_FUNCTION art_quick_alloc_object_tlab
- // RDI: uint32_t type_idx, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- // Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
- movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB
-END_FUNCTION art_quick_alloc_object_tlab
-
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be
// called with CC if the GC is not active.
DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- movq %rdi, %rdx
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
.Lart_quick_alloc_object_resolved_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB
@@ -1212,9 +1192,8 @@ END_FUNCTION art_quick_alloc_object_resolved_tlab
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB).
// May be called with CC if the GC is not active.
DEFINE_FUNCTION art_quick_alloc_object_initialized_tlab
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- movq %rdi, %rdx
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_tlab_slow_path
.Lart_quick_alloc_object_initialized_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedTLAB
@@ -1292,49 +1271,12 @@ DEFINE_FUNCTION art_quick_alloc_array_resolved_region_tlab
ALLOC_ARRAY_TLAB_SLOW_PATH artAllocArrayFromCodeResolvedRegionTLAB
END_FUNCTION art_quick_alloc_array_resolved_region_tlab
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB).
-DEFINE_FUNCTION art_quick_alloc_object_region_tlab
- // Fast path region tlab allocation.
- // RDI: uint32_t type_idx, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- ASSERT_USE_READ_BARRIER
- movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx // Load the class
- // Null check so that we can load the lock word.
- testl %edx, %edx
- jz .Lart_quick_alloc_object_region_tlab_slow_path
- // Since we have allocation entrypoint switching, we know the GC is marking.
- // Check the mark bit, if it is 0, do the read barrier mark.
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
- // Use resolved one since we already did the null check.
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark the class.
- PUSH rdi
- PUSH rsi
- subq LITERAL(8), %rsp // 16 byte alignment
- // Outgoing argument set up
- movq %rdx, %rdi // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
- movq %rax, %rdx
- addq LITERAL(8), %rsp
- POP rsi
- POP rdi
- jmp .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB
-END_FUNCTION art_quick_alloc_object_region_tlab
-
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB).
DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab
// Fast path region tlab allocation.
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ASSERT_USE_READ_BARRIER
- // No read barrier since the caller is responsible for that.
- movq %rdi, %rdx
ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
@@ -1343,10 +1285,9 @@ END_FUNCTION art_quick_alloc_object_resolved_region_tlab
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB).
DEFINE_FUNCTION art_quick_alloc_object_initialized_region_tlab
// Fast path region tlab allocation.
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ASSERT_USE_READ_BARRIER
- movq %rdi, %rdx
// No read barrier since the caller is responsible for that.
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_region_tlab_slow_path
.Lart_quick_alloc_object_initialized_region_tlab_slow_path:
@@ -2453,3 +2394,79 @@ DEFINE_FUNCTION art_quick_osr_stub
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
jmp *%rdx
END_FUNCTION art_quick_osr_stub
+
+DEFINE_FUNCTION art_quick_invoke_polymorphic
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // save callee saves
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread
+ movq %rsp, %rcx // pass SP
+ subq LITERAL(16), %rsp // make space for JValue result
+ CFI_ADJUST_CFA_OFFSET(16)
+ movq LITERAL(0), (%rsp) // initialize result
+ movq %rsp, %rdi // store pointer to JValue result
+ call SYMBOL(artInvokePolymorphic) // artInvokePolymorphic(result, receiver, Thread*, SP)
+ // save the code pointer
+ subq LITERAL('A'), %rax // Convert type descriptor character value to a zero based index.
+ cmpb LITERAL('Z' - 'A'), %al // Eliminate out of bounds options
+ ja .Lcleanup_and_return
+ movzbq %al, %rax
+ leaq .Lhandler_table(%rip), %rcx // Get the address of the handler table
+ movslq (%rcx, %rax, 4), %rax // Lookup handler offset relative to table
+ addq %rcx, %rax // Add table address to yield handler address.
+ jmpq *%rax // Jump to handler.
+
+.align 4
+.Lhandler_table: // Table of type descriptor to handlers.
+MACRO1(HANDLER_TABLE_OFFSET, handle_label)
+ // NB some tools require 32-bits for relocations. Shouldn't need adjusting.
+ .long RAW_VAR(handle_label) - .Lhandler_table
+END_MACRO
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // A
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // B (byte)
+ HANDLER_TABLE_OFFSET(.Lstore_char_result) // C (char)
+ HANDLER_TABLE_OFFSET(.Lstore_double_result) // D (double)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // E
+ HANDLER_TABLE_OFFSET(.Lstore_float_result) // F (float)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // G
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // H
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // I (int)
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // J (long)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // K
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // L (object - references are compressed and only 32-bits)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // M
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // N
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // O
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // P
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Q
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // R
+ HANDLER_TABLE_OFFSET(.Lstore_long_result) // S (short)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // T
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // U
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // V (void)
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // W
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // X
+ HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Y
+ HANDLER_TABLE_OFFSET(.Lstore_boolean_result) // Z (boolean)
+
+.Lstore_boolean_result:
+ movzbq (%rsp), %rax // Copy boolean result to the accumulator
+ jmp .Lcleanup_and_return
+.Lstore_char_result:
+ movzwq (%rsp), %rax // Copy char result to the accumulator
+ jmp .Lcleanup_and_return
+.Lstore_float_result:
+ movd (%rsp), %xmm0 // Copy float result to the context restored by
+ movd %xmm0, 32(%rsp) // RESTORE_SAVE_REFS_AND_ARGS_FRAME.
+ jmp .Lcleanup_and_return
+.Lstore_double_result:
+ movsd (%rsp), %xmm0 // Copy double result to the context restored by
+ movsd %xmm0, 32(%rsp) // RESTORE_SAVE_REFS_AND_ARGS_FRAME.
+ jmp .Lcleanup_and_return
+.Lstore_long_result:
+ movq (%rsp), %rax // Copy long result to the accumulator.
+ // Fall-through
+.Lcleanup_and_return:
+ addq LITERAL(16), %rsp // Pop space for JValue result.
+ CFI_ADJUST_CFA_OFFSET(16)
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ RETURN_OR_DELIVER_PENDING_EXCEPTION
+END_FUNCTION art_quick_invoke_polymorphic
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 8220ba4910..448b4600cc 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -351,7 +351,7 @@ ClassLinker::ClassLinker(InternTable* intern_table)
array_iftable_(nullptr),
find_array_class_cache_next_victim_(0),
init_done_(false),
- log_new_class_table_roots_(false),
+ log_new_roots_(false),
intern_table_(intern_table),
quick_resolution_trampoline_(nullptr),
quick_imt_conflict_trampoline_(nullptr),
@@ -1865,12 +1865,10 @@ bool ClassLinker::AddImageSpace(
<< reinterpret_cast<const void*>(section_end);
}
}
- if (!oat_file->GetBssGcRoots().empty()) {
- // Insert oat file to class table for visiting .bss GC roots.
- class_table->InsertOatFile(oat_file);
- }
- } else {
- DCHECK(oat_file->GetBssGcRoots().empty());
+ }
+ if (!oat_file->GetBssGcRoots().empty()) {
+ // Insert oat file to class table for visiting .bss GC roots.
+ class_table->InsertOatFile(oat_file);
}
if (added_class_table) {
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -1934,14 +1932,27 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
// Concurrent moving GC marked new roots through the to-space invariant.
CHECK_EQ(new_ref, old_ref);
}
+ for (const OatFile* oat_file : new_bss_roots_boot_oat_files_) {
+ for (GcRoot<mirror::Object>& root : oat_file->GetBssGcRoots()) {
+ ObjPtr<mirror::Object> old_ref = root.Read<kWithoutReadBarrier>();
+ if (old_ref != nullptr) {
+ DCHECK(old_ref->IsClass());
+ root.VisitRoot(visitor, RootInfo(kRootStickyClass));
+ ObjPtr<mirror::Object> new_ref = root.Read<kWithoutReadBarrier>();
+ // Concurrent moving GC marked new roots through the to-space invariant.
+ CHECK_EQ(new_ref, old_ref);
+ }
+ }
+ }
}
if ((flags & kVisitRootFlagClearRootLog) != 0) {
new_class_roots_.clear();
+ new_bss_roots_boot_oat_files_.clear();
}
if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
- log_new_class_table_roots_ = true;
+ log_new_roots_ = true;
} else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
- log_new_class_table_roots_ = false;
+ log_new_roots_ = false;
}
// We deliberately ignore the class roots in the image since we
// handle image roots by using the MS/CMS rescanning of dirty cards.
@@ -3303,6 +3314,7 @@ mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file,
ReaderMutexLock mu(self, *Locks::dex_lock_);
ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
+ // TODO: Check if the dex file was registered with the same class loader. Bug: 34193123
return dex_cache.Ptr();
}
}
@@ -3647,7 +3659,7 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, ObjPtr<mirror::C
// This is necessary because we need to have the card dirtied for remembered sets.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
}
- if (log_new_class_table_roots_) {
+ if (log_new_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
}
@@ -3660,6 +3672,14 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, ObjPtr<mirror::C
return nullptr;
}
+void ClassLinker::WriteBarrierForBootOatFileBssRoots(const OatFile* oat_file) {
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ DCHECK(!oat_file->GetBssGcRoots().empty()) << oat_file->GetLocation();
+ if (log_new_roots_ && !ContainsElement(new_bss_roots_boot_oat_files_, oat_file)) {
+ new_bss_roots_boot_oat_files_.push_back(oat_file);
+ }
+}
+
// TODO This should really be in mirror::Class.
void ClassLinker::UpdateClassMethods(ObjPtr<mirror::Class> klass,
LengthPrefixedArray<ArtMethod>* new_methods) {
@@ -5157,7 +5177,7 @@ bool ClassLinker::LinkClass(Thread* self,
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
}
CHECK_EQ(existing, klass.Get());
- if (log_new_class_table_roots_) {
+ if (log_new_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(h_new_class.Get()));
}
}
@@ -6929,7 +6949,7 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
method_alignment_);
const size_t old_methods_ptr_size = (old_methods != nullptr) ? old_size : 0;
auto* methods = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
- class_linker_->GetAllocatorForClassLoader(klass_->GetClassLoader())->Realloc(
+ Runtime::Current()->GetLinearAlloc()->Realloc(
self_, old_methods, old_methods_ptr_size, new_size));
CHECK(methods != nullptr); // Native allocation failure aborts.
@@ -6949,13 +6969,19 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
StrideIterator<ArtMethod> out(methods->begin(method_size_, method_alignment_) + old_method_count);
// Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
// we want the roots of the miranda methods to get visited.
- for (ArtMethod* mir_method : miranda_methods_) {
+ for (size_t i = 0; i < miranda_methods_.size(); ++i) {
+ ArtMethod* mir_method = miranda_methods_[i];
ArtMethod& new_method = *out;
new_method.CopyFrom(mir_method, pointer_size);
new_method.SetAccessFlags(new_method.GetAccessFlags() | kAccMiranda | kAccCopied);
DCHECK_NE(new_method.GetAccessFlags() & kAccAbstract, 0u)
<< "Miranda method should be abstract!";
move_table_.emplace(mir_method, &new_method);
+ // Update the entry in the method array, as the array will be used for future lookups,
+ // where thread suspension is allowed.
+ // As such, the array should not contain locally allocated ArtMethod, otherwise the GC
+ // would not see them.
+ miranda_methods_[i] = &new_method;
++out;
}
// We need to copy the default methods into our own method table since the runtime requires that
@@ -6964,9 +6990,10 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
// interface but will have different ArtMethod*s for them. This also means we cannot compare a
// default method found on a class with one found on the declaring interface directly and must
// look at the declaring class to determine if they are the same.
- for (const ScopedArenaVector<ArtMethod*>& methods_vec : {default_methods_,
- overriding_default_methods_}) {
- for (ArtMethod* def_method : methods_vec) {
+ for (ScopedArenaVector<ArtMethod*>* methods_vec : {&default_methods_,
+ &overriding_default_methods_}) {
+ for (size_t i = 0; i < methods_vec->size(); ++i) {
+ ArtMethod* def_method = (*methods_vec)[i];
ArtMethod& new_method = *out;
new_method.CopyFrom(def_method, pointer_size);
// Clear the kAccSkipAccessChecks flag if it is present. Since this class hasn't been
@@ -6977,12 +7004,18 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
constexpr uint32_t kMaskFlags = ~kAccSkipAccessChecks;
new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
move_table_.emplace(def_method, &new_method);
+ // Update the entry in the method array, as the array will be used for future lookups,
+ // where thread suspension is allowed.
+ // As such, the array should not contain locally allocated ArtMethod, otherwise the GC
+ // would not see them.
+ (*methods_vec)[i] = &new_method;
++out;
}
}
- for (const ScopedArenaVector<ArtMethod*>& methods_vec : {default_conflict_methods_,
- overriding_default_conflict_methods_}) {
- for (ArtMethod* conf_method : methods_vec) {
+ for (ScopedArenaVector<ArtMethod*>* methods_vec : {&default_conflict_methods_,
+ &overriding_default_conflict_methods_}) {
+ for (size_t i = 0; i < methods_vec->size(); ++i) {
+ ArtMethod* conf_method = (*methods_vec)[i];
ArtMethod& new_method = *out;
new_method.CopyFrom(conf_method, pointer_size);
// This is a type of default method (there are default method impls, just a conflict) so
@@ -6998,6 +7031,11 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
// that the compiler will not invoke the implementation of whatever method we copied from.
EnsureThrowsInvocationError(class_linker_, &new_method);
move_table_.emplace(conf_method, &new_method);
+ // Update the entry in the method array, as the array will be used for future lookups,
+ // where thread suspension is allowed.
+ // As such, the array should not contain locally allocated ArtMethod, otherwise the GC
+ // would not see them.
+ (*methods_vec)[i] = &new_method;
++out;
}
}
@@ -7030,12 +7068,7 @@ ObjPtr<mirror::PointerArray> ClassLinker::LinkInterfaceMethodsHelper::UpdateVtab
default_conflict_methods_,
miranda_methods_}) {
// These are the functions that are not already in the vtable!
- for (ArtMethod* new_method : methods_vec) {
- auto translated_method_it = move_table_.find(new_method);
- CHECK(translated_method_it != move_table_.end())
- << "We must have a translation for methods added to the classes methods_ array! We "
- << "could not find the ArtMethod added for " << ArtMethod::PrettyMethod(new_method);
- ArtMethod* new_vtable_method = translated_method_it->second;
+ for (ArtMethod* new_vtable_method : methods_vec) {
// Leave the declaring class alone the method's dex_code_item_offset_ and dex_method_index_
// fields are references into the dex file the method was defined in. Since the ArtMethod
// does not store that information it uses declaring_class_->dex_cache_.
@@ -7052,7 +7085,6 @@ ObjPtr<mirror::PointerArray> ClassLinker::LinkInterfaceMethodsHelper::UpdateVtab
ArtMethod* translated_method = vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size);
// Try and find what we need to change this method to.
auto translation_it = default_translations.find(i);
- bool found_translation = false;
if (translation_it != default_translations.end()) {
if (translation_it->second.IsInConflict()) {
// Find which conflict method we are to use for this method.
@@ -7076,30 +7108,28 @@ ObjPtr<mirror::PointerArray> ClassLinker::LinkInterfaceMethodsHelper::UpdateVtab
// Normal default method (changed from an older default or abstract interface method).
DCHECK(translation_it->second.IsTranslation());
translated_method = translation_it->second.GetTranslation();
+ auto it = move_table_.find(translated_method);
+ DCHECK(it != move_table_.end());
+ translated_method = it->second;
}
- found_translation = true;
+ } else {
+ auto it = move_table_.find(translated_method);
+ translated_method = (it != move_table_.end()) ? it->second : nullptr;
}
- DCHECK(translated_method != nullptr);
- auto it = move_table_.find(translated_method);
- if (it != move_table_.end()) {
- auto* new_method = it->second;
- DCHECK(new_method != nullptr);
+
+ if (translated_method != nullptr) {
// Make sure the new_methods index is set.
- if (new_method->GetMethodIndexDuringLinking() != i) {
+ if (translated_method->GetMethodIndexDuringLinking() != i) {
if (kIsDebugBuild) {
auto* methods = klass_->GetMethodsPtr();
CHECK_LE(reinterpret_cast<uintptr_t>(&*methods->begin(method_size_, method_alignment_)),
- reinterpret_cast<uintptr_t>(new_method));
- CHECK_LT(reinterpret_cast<uintptr_t>(new_method),
+ reinterpret_cast<uintptr_t>(translated_method));
+ CHECK_LT(reinterpret_cast<uintptr_t>(translated_method),
reinterpret_cast<uintptr_t>(&*methods->end(method_size_, method_alignment_)));
}
- new_method->SetMethodIndex(0xFFFF & i);
+ translated_method->SetMethodIndex(0xFFFF & i);
}
- vtable->SetElementPtrSize(i, new_method, pointer_size);
- } else {
- // If it was not going to be updated we wouldn't have put it into the default_translations
- // map.
- CHECK(!found_translation) << "We were asked to update this vtable entry. Must not fail.";
+ vtable->SetElementPtrSize(i, translated_method, pointer_size);
}
}
klass_->SetVTable(vtable.Ptr());
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 77322ede08..580acb7068 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -64,6 +64,7 @@ class ImtConflictTable;
template<typename T> class LengthPrefixedArray;
template<class T> class MutableHandle;
class InternTable;
+class OatFile;
template<class T> class ObjectLock;
class Runtime;
class ScopedObjectAccessAlreadyRunnable;
@@ -535,6 +536,12 @@ class ClassLinker {
REQUIRES(!Locks::classlinker_classes_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Add an oat file with .bss GC roots to be visited again at the end of GC
+ // for collector types that need it.
+ void WriteBarrierForBootOatFileBssRoots(const OatFile* oat_file)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
mirror::ObjectArray<mirror::Class>* GetClassRoots() REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
DCHECK(class_roots != nullptr);
@@ -1138,6 +1145,10 @@ class ClassLinker {
// New class roots, only used by CMS since the GC needs to mark these in the pause.
std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
+ // Boot image oat files with new .bss GC roots to be visited in the pause by CMS.
+ std::vector<const OatFile*> new_bss_roots_boot_oat_files_
+ GUARDED_BY(Locks::classlinker_classes_lock_);
+
// Number of times we've searched dex caches for a class. After a certain number of misses we move
// the classes into the class_table_ to avoid dex cache based searches.
Atomic<uint32_t> failed_dex_cache_class_lookups_;
@@ -1155,7 +1166,7 @@ class ClassLinker {
size_t find_array_class_cache_next_victim_;
bool init_done_;
- bool log_new_class_table_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
+ bool log_new_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
InternTable* intern_table_;
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index c30272e114..a44f79e193 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -428,6 +428,8 @@ static bool IsValidImplicitCheck(uintptr_t addr, ArtMethod* method, const Instru
case Instruction::INVOKE_VIRTUAL_RANGE:
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_POLYMORPHIC:
+ case Instruction::INVOKE_POLYMORPHIC_RANGE:
case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
// Without inlining, we could just check that the offset is the class offset.
@@ -551,6 +553,12 @@ void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) {
case Instruction::INVOKE_INTERFACE_RANGE:
ThrowNullPointerExceptionForMethodAccess(instr->VRegB_3rc(), kInterface);
break;
+ case Instruction::INVOKE_POLYMORPHIC:
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_45cc(), kVirtual);
+ break;
+ case Instruction::INVOKE_POLYMORPHIC_RANGE:
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_4rcc(), kVirtual);
+ break;
case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
// Since we replaced the method index, we ask the verifier to tell us which
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 7b8974fa5a..37f3ac92e9 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -358,7 +358,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
}
break;
case k35c: {
- uint32_t arg[5];
+ uint32_t arg[kMaxVarArgRegs];
GetVarArgs(arg);
switch (Opcode()) {
case FILLED_NEW_ARRAY:
@@ -443,8 +443,50 @@ std::string Instruction::DumpString(const DexFile* file) const {
}
break;
}
+ case k45cc: {
+ uint32_t arg[kMaxVarArgRegs];
+ GetVarArgs(arg);
+ uint32_t method_idx = VRegB_45cc();
+ uint32_t proto_idx = VRegH_45cc();
+ os << opcode << " {";
+ for (int i = 0; i < VRegA_45cc(); ++i) {
+ if (i != 0) {
+ os << ", ";
+ }
+ os << "v" << arg[i];
+ }
+ os << "}";
+ if (file != nullptr) {
+ os << ", " << file->PrettyMethod(method_idx) << ", " << file->GetShorty(proto_idx)
+ << " // ";
+ } else {
+ os << ", ";
+ }
+ os << "method@" << method_idx << ", proto@" << proto_idx;
+ break;
+ }
+ case k4rcc:
+ switch (Opcode()) {
+ case INVOKE_POLYMORPHIC_RANGE: {
+ if (file != nullptr) {
+ uint32_t method_idx = VRegB_4rcc();
+ uint32_t proto_idx = VRegH_4rcc();
+ os << opcode << ", {v" << VRegC_4rcc() << " .. v" << (VRegC_4rcc() + VRegA_4rcc())
+ << "}, " << file->PrettyMethod(method_idx) << ", " << file->GetShorty(proto_idx)
+ << " // method@" << method_idx << ", proto@" << proto_idx;
+ break;
+ }
+ }
+ FALLTHROUGH_INTENDED;
+ default: {
+ uint32_t method_idx = VRegB_4rcc();
+ uint32_t proto_idx = VRegH_4rcc();
+ os << opcode << ", {v" << VRegC_4rcc() << " .. v" << (VRegC_4rcc() + VRegA_4rcc())
+ << "}, method@" << method_idx << ", proto@" << proto_idx;
+ }
+ }
+ break;
case k51l: os << StringPrintf("%s v%d, #%+" PRId64, opcode, VRegA_51l(), VRegB_51l()); break;
- default: os << " unknown format (" << DumpHex(5) << ")"; break;
}
return os.str();
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 72dad0cb32..7d6f866617 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -107,48 +107,21 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
return inlined_method;
}
-inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type) {
- return GetCalleeSaveMethodCaller(
- self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */);
-}
-
-template <const bool kAccessCheck>
-ALWAYS_INLINE
-inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx,
- ArtMethod* method,
- Thread* self,
- bool* slow_path) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- PointerSize pointer_size = class_linker->GetImagePointerSize();
- mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, pointer_size);
- if (UNLIKELY(klass == nullptr)) {
- klass = class_linker->ResolveType(type_idx, method);
+ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(mirror::Class* klass,
+ Thread* self,
+ bool* slow_path)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_) {
+ if (UNLIKELY(!klass->IsInstantiable())) {
+ self->ThrowNewException("Ljava/lang/InstantiationError;", klass->PrettyDescriptor().c_str());
*slow_path = true;
- if (klass == nullptr) {
- DCHECK(self->IsExceptionPending());
- return nullptr; // Failure
- } else {
- DCHECK(!self->IsExceptionPending());
- }
+ return nullptr; // Failure
}
- if (kAccessCheck) {
- if (UNLIKELY(!klass->IsInstantiable())) {
- self->ThrowNewException("Ljava/lang/InstantiationError;", klass->PrettyDescriptor().c_str());
- *slow_path = true;
- return nullptr; // Failure
- }
- if (UNLIKELY(klass->IsClassClass())) {
- ThrowIllegalAccessError(nullptr, "Class %s is inaccessible",
- klass->PrettyDescriptor().c_str());
- *slow_path = true;
- return nullptr; // Failure
- }
- mirror::Class* referrer = method->GetDeclaringClass();
- if (UNLIKELY(!referrer->CanAccess(klass))) {
- ThrowIllegalAccessErrorClass(referrer, klass);
- *slow_path = true;
- return nullptr; // Failure
- }
+ if (UNLIKELY(klass->IsClassClass())) {
+ ThrowIllegalAccessError(nullptr, "Class %s is inaccessible",
+ klass->PrettyDescriptor().c_str());
+ *slow_path = true;
+ return nullptr; // Failure
}
if (UNLIKELY(!klass->IsInitialized())) {
StackHandleScope<1> hs(self);
@@ -176,7 +149,9 @@ inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx,
ALWAYS_INLINE
inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
- bool* slow_path) {
+ bool* slow_path)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_) {
if (UNLIKELY(!klass->IsInitialized())) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_class(hs.NewHandle(klass));
@@ -198,18 +173,15 @@ inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
return klass;
}
-// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
-// cannot be resolved, throw an error. If it can, use it to create an instance.
-// When verification/compiler hasn't been able to verify access, optionally perform an access
-// check.
-template <bool kAccessCheck, bool kInstrumented>
+// Allocate an instance of klass. Throws InstantationError if klass is not instantiable,
+// or IllegalAccessError if klass is j.l.Class. Performs a clinit check too.
+template <bool kInstrumented>
ALWAYS_INLINE
-inline mirror::Object* AllocObjectFromCode(dex::TypeIndex type_idx,
- ArtMethod* method,
+inline mirror::Object* AllocObjectFromCode(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type) {
bool slow_path = false;
- mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path);
+ klass = CheckObjectAlloc(klass, self, &slow_path);
if (UNLIKELY(slow_path)) {
if (klass == nullptr) {
return nullptr;
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 5390165ecd..b17e1a8ab1 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -261,11 +261,8 @@ bool FillArrayData(ObjPtr<mirror::Object> obj, const Instruction::ArrayDataPaylo
return true;
}
-ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
- Runtime::CalleeSaveType type,
- bool do_caller_check)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+static inline std::pair<ArtMethod*, uintptr_t> DoGetCalleeSaveMethodOuterCallerAndPc(
+ ArtMethod** sp, Runtime::CalleeSaveType type) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
@@ -275,6 +272,13 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
(reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
ArtMethod* outer_method = *caller_sp;
+ return std::make_pair(outer_method, caller_pc);
+}
+
+static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method,
+ uintptr_t caller_pc,
+ bool do_caller_check)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* caller = outer_method;
if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
if (outer_method != nullptr) {
@@ -308,8 +312,33 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
visitor.WalkStack();
caller = visitor.caller;
}
+ return caller;
+}
+ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
+ Runtime::CalleeSaveType type,
+ bool do_caller_check)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ auto outer_caller_and_pc = DoGetCalleeSaveMethodOuterCallerAndPc(sp, type);
+ ArtMethod* outer_method = outer_caller_and_pc.first;
+ uintptr_t caller_pc = outer_caller_and_pc.second;
+ ArtMethod* caller = DoGetCalleeSaveMethodCaller(outer_method, caller_pc, do_caller_check);
return caller;
}
+CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self,
+ Runtime::CalleeSaveType type) {
+ CallerAndOuterMethod result;
+ ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ auto outer_caller_and_pc = DoGetCalleeSaveMethodOuterCallerAndPc(sp, type);
+ result.outer_method = outer_caller_and_pc.first;
+ uintptr_t caller_pc = outer_caller_and_pc.second;
+ result.caller =
+ DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check */ true);
+ return result;
+}
+
+
} // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 7cc136e227..d4cf83c8de 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -45,27 +45,10 @@ class OatQuickMethodHeader;
class ScopedObjectAccessAlreadyRunnable;
class Thread;
-template <const bool kAccessCheck>
-ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx,
- ArtMethod* method,
- Thread* self,
- bool* slow_path)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Roles::uninterruptible_);
-
-ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
- Thread* self,
- bool* slow_path)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Roles::uninterruptible_);
-
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
-// When verification/compiler hasn't been able to verify access, optionally perform an access
-// check.
-template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(dex::TypeIndex type_idx,
- ArtMethod* method,
+template <bool kInstrumented>
+ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -218,7 +201,13 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
bool do_caller_check = false)
REQUIRES_SHARED(Locks::mutator_lock_);
-ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
+struct CallerAndOuterMethod {
+ ArtMethod* caller;
+ ArtMethod* outer_method;
+};
+
+CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self,
+ Runtime::CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 82bb8e53c6..2d06508069 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -29,87 +29,58 @@ namespace art {
static constexpr bool kUseTlabFastPath = true;
+template <bool kInitialized,
+ bool kFinalize,
+ bool kInstrumented,
+ gc::AllocatorType allocator_type>
+static ALWAYS_INLINE inline mirror::Object* artAllocObjectFromCode(
+ mirror::Class* klass,
+ Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ DCHECK(klass != nullptr);
+ if (kUseTlabFastPath && !kInstrumented && allocator_type == gc::kAllocatorTypeTLAB) {
+ if (kInitialized || klass->IsInitialized()) {
+ if (!kFinalize || !klass->IsFinalizable()) {
+ size_t byte_count = klass->GetObjectSize();
+ byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment);
+ mirror::Object* obj;
+ if (LIKELY(byte_count < self->TlabSize())) {
+ obj = self->AllocTlab(byte_count);
+ DCHECK(obj != nullptr) << "AllocTlab can't fail";
+ obj->SetClass(klass);
+ if (kUseBakerReadBarrier) {
+ obj->AssertReadBarrierState();
+ }
+ QuasiAtomic::ThreadFenceForConstructor();
+ return obj;
+ }
+ }
+ }
+ }
+ if (kInitialized) {
+ return AllocObjectFromCodeInitialized<kInstrumented>(klass, self, allocator_type);
+ } else if (!kFinalize) {
+ return AllocObjectFromCodeResolved<kInstrumented>(klass, self, allocator_type);
+ } else {
+ return AllocObjectFromCode<kInstrumented>(klass, self, allocator_type);
+ }
+}
+
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
-extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
- uint32_t type_idx, ArtMethod* method, Thread* self) \
+extern "C" mirror::Object* artAllocObjectFromCodeWithChecks##suffix##suffix2( \
+ mirror::Class* klass, Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- mirror::Class* klass = method->GetDexCacheResolvedType<false>(dex::TypeIndex(type_idx), \
- kRuntimePointerSize); \
- if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \
- size_t byte_count = klass->GetObjectSize(); \
- byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
- mirror::Object* obj; \
- if (LIKELY(byte_count < self->TlabSize())) { \
- obj = self->AllocTlab(byte_count); \
- DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
- obj->SetClass(klass); \
- if (kUseBakerReadBarrier) { \
- obj->AssertReadBarrierState(); \
- } \
- QuasiAtomic::ThreadFenceForConstructor(); \
- return obj; \
- } \
- } \
- } \
- return AllocObjectFromCode<false, instrumented_bool>(dex::TypeIndex(type_idx), \
- method, \
- self, \
- allocator_type); \
+ return artAllocObjectFromCode<false, true, instrumented_bool, allocator_type>(klass, self); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
+ mirror::Class* klass, Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- if (LIKELY(klass->IsInitialized())) { \
- size_t byte_count = klass->GetObjectSize(); \
- byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
- mirror::Object* obj; \
- if (LIKELY(byte_count < self->TlabSize())) { \
- obj = self->AllocTlab(byte_count); \
- DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
- obj->SetClass(klass); \
- if (kUseBakerReadBarrier) { \
- obj->AssertReadBarrierState(); \
- } \
- QuasiAtomic::ThreadFenceForConstructor(); \
- return obj; \
- } \
- } \
- } \
- return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
+ return artAllocObjectFromCode<false, false, instrumented_bool, allocator_type>(klass, self); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- size_t byte_count = klass->GetObjectSize(); \
- byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
- mirror::Object* obj; \
- if (LIKELY(byte_count < self->TlabSize())) { \
- obj = self->AllocTlab(byte_count); \
- DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
- obj->SetClass(klass); \
- if (kUseBakerReadBarrier) { \
- obj->AssertReadBarrierState(); \
- } \
- QuasiAtomic::ThreadFenceForConstructor(); \
- return obj; \
- } \
- } \
- return AllocObjectFromCodeInitialized<instrumented_bool>(klass, self, allocator_type); \
-} \
-extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, ArtMethod* method, Thread* self) \
+ mirror::Class* klass, Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- return AllocObjectFromCode<true, instrumented_bool>(dex::TypeIndex(type_idx), \
- method, \
- self, \
- allocator_type); \
+ return artAllocObjectFromCode<true, false, instrumented_bool, allocator_type>(klass, self); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
@@ -220,10 +191,9 @@ GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(RegionTLAB, gc::kAllocatorTypeRegionTLAB)
extern "C" void* art_quick_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_resolved##suffix(mirror::Class* klass, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_with_checks##suffix(mirror::Class* klass); \
extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_string_from_bytes##suffix(void*, int32_t, int32_t, int32_t); \
@@ -233,9 +203,9 @@ extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, int32_t,
extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(mirror::Class* klass, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_with_checks##suffix##_instrumented(mirror::Class* klass); \
extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_string_from_bytes##suffix##_instrumented(void*, int32_t, int32_t, int32_t); \
@@ -246,10 +216,9 @@ void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrument
qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented; \
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix##_instrumented; \
qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix##_instrumented; \
- qpoints->pAllocObject = art_quick_alloc_object##suffix##_instrumented; \
qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix##_instrumented; \
qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix##_instrumented; \
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix##_instrumented; \
+ qpoints->pAllocObjectWithChecks = art_quick_alloc_object_with_checks##suffix##_instrumented; \
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix##_instrumented; \
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented; \
qpoints->pAllocStringFromBytes = art_quick_alloc_string_from_bytes##suffix##_instrumented; \
@@ -259,10 +228,9 @@ void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrument
qpoints->pAllocArray = art_quick_alloc_array##suffix; \
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix; \
qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix; \
- qpoints->pAllocObject = art_quick_alloc_object##suffix; \
qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix; \
qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix; \
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix; \
+ qpoints->pAllocObjectWithChecks = art_quick_alloc_object_with_checks##suffix; \
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix; \
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix; \
qpoints->pAllocStringFromBytes = art_quick_alloc_string_from_bytes##suffix; \
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index 64030f36bc..2d0932a0c4 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -109,8 +109,13 @@ extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, v
extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+
extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+// Invoke polymorphic entrypoint. Return type is dynamic and may be void, a primitive value, or
+// reference return type.
+extern "C" void art_quick_invoke_polymorphic(uint32_t, void*);
+
// Thread entrypoints.
extern "C" void art_quick_test_suspend();
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 78dad94dfe..8ce61c1021 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -106,6 +106,7 @@ void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints)
art_quick_invoke_super_trampoline_with_access_check;
qpoints->pInvokeVirtualTrampolineWithAccessCheck =
art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokePolymorphic = art_quick_invoke_polymorphic;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 5dad43e7fa..5b1b2871c2 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -31,22 +31,56 @@
namespace art {
+static inline void BssWriteBarrier(ArtMethod* outer_method) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // For AOT code, we need a write barrier for the class loader that holds
+ // the GC roots in the .bss.
+ const DexFile* dex_file = outer_method->GetDexFile();
+ if (dex_file != nullptr &&
+ dex_file->GetOatDexFile() != nullptr &&
+ !dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
+ mirror::ClassLoader* class_loader = outer_method->GetClassLoader();
+ if (class_loader != nullptr) {
+ DCHECK(!class_loader->GetClassTable()->InsertOatFile(dex_file->GetOatDexFile()->GetOatFile()))
+ << "Oat file with .bss GC roots was not registered in class table: "
+ << dex_file->GetOatDexFile()->GetOatFile()->GetLocation();
+ // Note that we emit the barrier before the compiled code stores the String or Class
+ // as a GC root. This is OK as there is no suspend point point in between.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ } else {
+ Runtime::Current()->GetClassLinker()->WriteBarrierForBootOatFileBssRoots(
+ dex_file->GetOatDexFile()->GetOatFile());
+ }
+ }
+}
+
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
- return ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, true, false);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ ArtMethod* caller = caller_and_outer.caller;
+ mirror::Class* result =
+ ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, true, false);
+ if (LIKELY(result != nullptr)) {
+ BssWriteBarrier(caller_and_outer.outer_method);
+ }
+ return result;
}
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
- return ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, false);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ ArtMethod* caller = caller_and_outer.caller;
+ mirror::Class* result =
+ ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, false);
+ if (LIKELY(result != nullptr)) {
+ BssWriteBarrier(caller_and_outer.outer_method);
+ }
+ return result;
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
@@ -54,36 +88,28 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
- return ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, true);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ ArtMethod* caller = caller_and_outer.caller;
+ mirror::Class* result =
+ ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, true);
+ if (LIKELY(result != nullptr)) {
+ BssWriteBarrier(caller_and_outer.outer_method);
+ }
+ return result;
}
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(
self,
// TODO: Change art_quick_resolve_string on MIPS and MIPS64 to kSaveEverything.
(kRuntimeISA == kMips || kRuntimeISA == kMips64) ? Runtime::kSaveRefsOnly
: Runtime::kSaveEverything);
+ ArtMethod* caller = caller_and_outer.caller;
mirror::String* result = ResolveStringFromCode(caller, dex::StringIndex(string_idx));
if (LIKELY(result != nullptr)) {
- // For AOT code, we need a write barrier for the class loader that holds
- // the GC roots in the .bss.
- const DexFile* dex_file = caller->GetDexFile();
- if (dex_file != nullptr &&
- dex_file->GetOatDexFile() != nullptr &&
- !dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
- mirror::ClassLoader* class_loader = caller->GetDeclaringClass()->GetClassLoader();
- DCHECK(class_loader != nullptr); // We do not use .bss GC roots for boot image.
- DCHECK(
- !class_loader->GetClassTable()->InsertOatFile(dex_file->GetOatDexFile()->GetOatFile()))
- << "Oat file with .bss GC roots was not registered in class table: "
- << dex_file->GetOatDexFile()->GetOatFile()->GetLocation();
- // Note that we emit the barrier before the compiled code stores the string as GC root.
- // This is OK as there is no suspend point point in between.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
- }
+ BssWriteBarrier(caller_and_outer.outer_method);
}
return result;
}
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index a1c5082c93..4d5d6de41d 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -23,10 +23,9 @@
V(AllocArray, void*, uint32_t, int32_t, ArtMethod*) \
V(AllocArrayResolved, void*, mirror::Class*, int32_t, ArtMethod*) \
V(AllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \
- V(AllocObject, void*, uint32_t, ArtMethod*) \
- V(AllocObjectResolved, void*, mirror::Class*, ArtMethod*) \
- V(AllocObjectInitialized, void*, mirror::Class*, ArtMethod*) \
- V(AllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*) \
+ V(AllocObjectResolved, void*, mirror::Class*) \
+ V(AllocObjectInitialized, void*, mirror::Class*) \
+ V(AllocObjectWithChecks, void*, mirror::Class*) \
V(CheckAndAllocArray, void*, uint32_t, int32_t, ArtMethod*) \
V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \
V(AllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t) \
@@ -134,6 +133,7 @@
V(InvokeStaticTrampolineWithAccessCheck, void, uint32_t, void*) \
V(InvokeSuperTrampolineWithAccessCheck, void, uint32_t, void*) \
V(InvokeVirtualTrampolineWithAccessCheck, void, uint32_t, void*) \
+ V(InvokePolymorphic, void, uint32_t, void*) \
\
V(TestSuspend, void, void) \
\
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index a3e5b552b5..eb76fb6b88 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -27,10 +27,12 @@
#include "imtable-inl.h"
#include "interpreter/interpreter.h"
#include "linear_alloc.h"
+#include "method_handles.h"
#include "method_reference.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/method.h"
+#include "mirror/method_handle_impl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "oat_quick_method_header.h"
@@ -39,6 +41,7 @@
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "debugger.h"
+#include "well_known_classes.h"
namespace art {
@@ -2391,4 +2394,121 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
reinterpret_cast<uintptr_t>(method));
}
+// Returns shorty type so the caller can determine how to put |result|
+// into expected registers. The shorty type is static so the compiler
+// could call different flavors of this code path depending on the
+// shorty type though this would require different entry points for
+// each type.
+extern "C" uintptr_t artInvokePolymorphic(
+ JValue* result,
+ mirror::Object* raw_method_handle,
+ Thread* self,
+ ArtMethod** sp)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveRefsAndArgs));
+
+ // Start new JNI local reference state
+ JNIEnvExt* env = self->GetJniEnv();
+ ScopedObjectAccessUnchecked soa(env);
+ ScopedJniEnvLocalRefState env_state(env);
+ const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
+
+ // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
+ ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
+ uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
+ const DexFile::CodeItem* code = caller_method->GetCodeItem();
+ const Instruction* inst = Instruction::At(&code->insns_[dex_pc]);
+ DCHECK(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC ||
+ inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
+ const DexFile* dex_file = caller_method->GetDexFile();
+ const uint32_t proto_idx = inst->VRegH();
+ const char* shorty = dex_file->GetShorty(proto_idx);
+ const size_t shorty_length = strlen(shorty);
+ static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static.
+ RememberForGcArgumentVisitor gc_visitor(sp, kMethodIsStatic, shorty, shorty_length, &soa);
+ gc_visitor.VisitArguments();
+
+ // Wrap raw_method_handle in a Handle for safety.
+ StackHandleScope<5> hs(self);
+ Handle<mirror::MethodHandleImpl> method_handle(
+ hs.NewHandle(ObjPtr<mirror::MethodHandleImpl>::DownCast(MakeObjPtr(raw_method_handle))));
+ raw_method_handle = nullptr;
+ self->EndAssertNoThreadSuspension(old_cause);
+
+ // Resolve method - it's either MethodHandle.invoke() or MethodHandle.invokeExact().
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::kForceICCECheck>(self,
+ inst->VRegB(),
+ caller_method,
+ kVirtual);
+ DCHECK((resolved_method ==
+ jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact)) ||
+ (resolved_method ==
+ jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invoke)));
+ if (UNLIKELY(method_handle.IsNull())) {
+ ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual);
+ return static_cast<uintptr_t>('V');
+ }
+
+ Handle<mirror::Class> caller_class(hs.NewHandle(caller_method->GetDeclaringClass()));
+ Handle<mirror::MethodType> method_type(hs.NewHandle(linker->ResolveMethodType(
+ *dex_file, proto_idx,
+ hs.NewHandle<mirror::DexCache>(caller_class->GetDexCache()),
+ hs.NewHandle<mirror::ClassLoader>(caller_class->GetClassLoader()))));
+ // This implies we couldn't resolve one or more types in this method handle.
+ if (UNLIKELY(method_type.IsNull())) {
+ CHECK(self->IsExceptionPending());
+ return static_cast<uintptr_t>('V');
+ }
+
+ DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst->VRegA());
+ DCHECK_EQ(resolved_method->IsStatic(), kMethodIsStatic);
+
+ // Fix references before constructing the shadow frame.
+ gc_visitor.FixupReferences();
+
+ // Construct shadow frame placing arguments consecutively from |first_arg|.
+ const bool is_range = (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
+ const size_t num_vregs = is_range ? inst->VRegA_4rcc() : inst->VRegA_45cc();
+ const size_t first_arg = 0;
+ ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
+ CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc);
+ ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
+ ScopedStackedShadowFramePusher
+ frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
+ BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
+ kMethodIsStatic,
+ shorty,
+ strlen(shorty),
+ shadow_frame,
+ first_arg);
+ shadow_frame_builder.VisitArguments();
+
+ // Push a transition back into managed code onto the linked list in thread.
+ ManagedStack fragment;
+ self->PushManagedStackFragment(&fragment);
+
+ // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in
+ // consecutive order.
+ uint32_t unused_args[Instruction::kMaxVarArgRegs] = {};
+ uint32_t first_callee_arg = first_arg + 1;
+ const bool do_assignability_check = false;
+ if (!DoInvokePolymorphic<true /* is_range */, do_assignability_check>(self,
+ resolved_method,
+ *shadow_frame,
+ method_handle,
+ method_type,
+ unused_args,
+ first_callee_arg,
+ result)) {
+ DCHECK(self->IsExceptionPending());
+ }
+
+ // Pop transition record.
+ self->PopManagedStackFragment(fragment);
+
+ return static_cast<uintptr_t>(shorty[0]);
+}
+
} // namespace art
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 12836602d5..96e17daa08 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -121,10 +121,10 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
sizeof(Thread::tls_ptr_sized_values::active_suspend_barriers));
// Skip across the entrypoints structures.
-
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_start, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_objects, sizeof(void*));
+
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(size_t));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
@@ -156,13 +156,13 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArray, pAllocArrayResolved, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved, pAllocArrayWithAccessCheck,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObject, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObject, pAllocObjectResolved, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObjectResolved,
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectResolved, pAllocObjectInitialized,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectInitialized, pAllocObjectWithAccessCheck,
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectInitialized, pAllocObjectWithChecks,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectWithAccessCheck, pCheckAndAllocArray,
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectWithChecks, pCheckAndAllocArray,
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckAndAllocArray, pCheckAndAllocArrayWithAccessCheck,
sizeof(void*));
@@ -286,6 +286,8 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeSuperTrampolineWithAccessCheck,
pInvokeVirtualTrampolineWithAccessCheck, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeVirtualTrampolineWithAccessCheck,
+ pInvokePolymorphic, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokePolymorphic,
pTestSuspend, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pTestSuspend, pDeliverException, sizeof(void*));
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index e1117e6ea3..7b86339663 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2406,16 +2406,29 @@ void ConcurrentCopying::FinishPhase() {
}
}
-bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
+bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) {
mirror::Object* from_ref = field->AsMirrorPtr();
+ if (from_ref == nullptr) {
+ return true;
+ }
mirror::Object* to_ref = IsMarked(from_ref);
if (to_ref == nullptr) {
return false;
}
if (from_ref != to_ref) {
- QuasiAtomic::ThreadFenceRelease();
- field->Assign(to_ref);
- QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ if (do_atomic_update) {
+ do {
+ if (field->AsMirrorPtr() != from_ref) {
+ // Concurrently overwritten by a mutator.
+ break;
+ }
+ } while (!field->CasWeakRelaxed(from_ref, to_ref));
+ } else {
+ QuasiAtomic::ThreadFenceRelease();
+ field->Assign(to_ref);
+ QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ }
}
return true;
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 5b8a557375..844bb450cc 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -183,7 +183,8 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 5b513991d1..0177e2a1ad 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -187,7 +187,10 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
// and will be used for reading system weaks while the GC is running.
virtual mirror::Object* IsMarked(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
+ // Returns true if the given heap reference is null or is already marked. If it's already marked,
+ // update the reference (uses a CAS if do_atomic_update is true. Otherwise, returns false.
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Used by reference processor.
virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index ddcb6c0698..85e6783599 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -472,9 +472,15 @@ mirror::Object* MarkCompact::IsMarked(mirror::Object* object) {
return mark_bitmap_->Test(object) ? object : nullptr;
}
-bool MarkCompact::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr) {
+bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr,
+ // MarkCompact does the GC in a pause. No CAS needed.
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
// Side effect free since we call this before ever moving objects.
- return IsMarked(ref_ptr->AsMirrorPtr()) != nullptr;
+ mirror::Object* obj = ref_ptr->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
+ return IsMarked(obj) != nullptr;
}
void MarkCompact::SweepSystemWeaks() {
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 564f85b3f8..6d52d5d515 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -175,7 +175,8 @@ class MarkCompact : public GarbageCollector {
virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 06ed0290a9..f00da73458 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -390,8 +390,13 @@ inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
}
}
-bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
- return IsMarked(ref->AsMirrorPtr());
+bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
+ mirror::Object* obj = ref->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
+ return IsMarked(obj);
}
class MarkSweep::MarkObjectSlowPath {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 02cf462bd3..a6e2d61f6d 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -188,7 +188,8 @@ class MarkSweep : public GarbageCollector {
void VerifyIsLive(const mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index f2aa5a7599..cb9e7e2c15 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -765,8 +765,13 @@ mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
-bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) {
+bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ // SemiSpace does the GC in a pause. No CAS needed.
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
mirror::Object* obj = object->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
mirror::Object* new_obj = IsMarked(obj);
if (new_obj == nullptr) {
return false;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 4cebcc3044..52b5e5fe30 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -166,7 +166,8 @@ class SemiSpace : public GarbageCollector {
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ bool do_atomic_update) OVERRIDE
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 081be968eb..c1548365c7 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -203,7 +203,9 @@ void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
DCHECK(klass != nullptr);
DCHECK(klass->IsTypeOfReferenceClass());
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
- if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
+ // do_atomic_update needs to be true because this happens outside of the reference processing
+ // phase.
+ if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
Thread* self = Thread::Current();
// TODO: Remove these locks, and use atomic stacks for storing references?
// We need to check that the references haven't already been enqueued since we can end up
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index a0eb197bd5..734caea371 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -129,8 +129,9 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
while (!IsEmpty()) {
ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr &&
- !collector->IsMarkedHeapReference(referent_addr)) {
+ // do_atomic_update is false because this happens during the reference processing phase where
+ // Reference.clear() would block.
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
// Referent is white, clear it.
if (Runtime::Current()->IsActiveTransaction()) {
ref->ClearReferent<true>();
@@ -147,8 +148,9 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_referenc
while (!IsEmpty()) {
ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr &&
- !collector->IsMarkedHeapReference(referent_addr)) {
+ // do_atomic_update is false because this happens during the reference processing phase where
+ // Reference.clear() would block.
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index b0d7fb247a..d7dfcd4408 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -508,9 +508,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
} else {
- obj = AllocObjectFromCode<do_access_check, true>(
- dex::TypeIndex(inst->VRegB_21c()),
- shadow_frame.GetMethod(),
+ obj = AllocObjectFromCode<true>(
+ c.Ptr(),
self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index c8c1563ff6..369c2614a7 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -375,10 +375,9 @@ extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
} else {
- obj = AllocObjectFromCode<false, true>(dex::TypeIndex(inst->VRegB_21c()),
- shadow_frame->GetMethod(),
- self,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ obj = AllocObjectFromCode<true>(c,
+ self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
}
if (UNLIKELY(obj == nullptr)) {
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index f80c43d80c..e0f28adc4f 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -566,7 +566,10 @@ jweak JavaVMExt::AddWeakGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) {
return nullptr;
}
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
- while (UNLIKELY(!MayAccessWeakGlobals(self))) {
+ // CMS needs this to block for concurrent reference processing because an object allocated during
+ // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
+ // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
+ while (!kUseReadBarrier && UNLIKELY(!MayAccessWeakGlobals(self))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpoint();
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index e70b93607e..22fb83cb5c 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -34,6 +34,15 @@ HeapReference<MirrorType> HeapReference<MirrorType>::FromObjPtr(ObjPtr<MirrorTyp
return HeapReference<MirrorType>(ptr.Ptr());
}
+template<class MirrorType>
+bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
+ HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_ptr));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_ptr));
+ Atomic<uint32_t>* atomic_reference = reinterpret_cast<Atomic<uint32_t>*>(&this->reference_);
+ return atomic_reference->CompareExchangeWeakRelaxed(expected_ref.reference_,
+ new_ref.reference_);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 71f34c66e2..a96a120d68 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -94,6 +94,9 @@ class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, Mirr
static HeapReference<MirrorType> FromObjPtr(ObjPtr<MirrorType> ptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ bool CasWeakRelaxed(MirrorType* old_ptr, MirrorType* new_ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 893abd5462..9c0927584e 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1361,8 +1361,10 @@ void MonitorList::BroadcastForNewMonitors() {
void MonitorList::Add(Monitor* m) {
Thread* self = Thread::Current();
MutexLock mu(self, monitor_list_lock_);
- while (UNLIKELY((!kUseReadBarrier && !allow_new_monitors_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ // CMS needs this to block for concurrent reference processing because an object allocated during
+ // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
+ // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
+ while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpoint();
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 36825cb870..268d71ac65 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -17,6 +17,7 @@
#include "dalvik_system_VMStack.h"
#include "art_method-inl.h"
+#include "gc/task_processor.h"
#include "jni_internal.h"
#include "nth_caller_visitor.h"
#include "mirror/class-inl.h"
@@ -31,9 +32,18 @@ namespace art {
static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer)
REQUIRES_SHARED(Locks::mutator_lock_) {
jobject trace = nullptr;
- if (soa.Decode<mirror::Object>(peer) == soa.Self()->GetPeer()) {
+ ObjPtr<mirror::Object> decoded_peer = soa.Decode<mirror::Object>(peer);
+ if (decoded_peer == soa.Self()->GetPeer()) {
trace = soa.Self()->CreateInternalStackTrace<false>(soa);
} else {
+ // Never allow suspending the heap task thread since it may deadlock if allocations are
+ // required for the stack trace.
+ Thread* heap_task_thread =
+ Runtime::Current()->GetHeap()->GetTaskProcessor()->GetRunningThread();
+ // heap_task_thread could be null if the daemons aren't yet started.
+ if (heap_task_thread != nullptr && decoded_peer == heap_task_thread->GetPeer()) {
+ return nullptr;
+ }
// Suspend thread to build stack trace.
ScopedThreadSuspension sts(soa.Self(), kNative);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
diff --git a/runtime/oat.h b/runtime/oat.h
index 1fd906dc1b..f2d457c2ee 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '9', '4', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '9', '7', '\0' }; // HLoadClass/kBssEntry change
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 38df427ed1..d47f1b5611 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -323,8 +323,10 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) {
}
PointerSize pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
- uint8_t* dex_cache_arrays = bss_begin_;
- uint8_t* dex_cache_arrays_end = (bss_roots_ != nullptr) ? bss_roots_ : bss_end_;
+ uint8_t* dex_cache_arrays = (bss_begin_ == bss_roots_) ? nullptr : bss_begin_;
+ uint8_t* dex_cache_arrays_end =
+ (bss_begin_ == bss_roots_) ? nullptr : (bss_roots_ != nullptr) ? bss_roots_ : bss_end_;
+ DCHECK_EQ(dex_cache_arrays != nullptr, dex_cache_arrays_end != nullptr);
uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
oat_dex_files_storage_.reserve(dex_file_count);
for (size_t i = 0; i < dex_file_count; i++) {
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index b757b2114f..4bd21b4c2f 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -29,6 +29,7 @@ cc_defaults {
"ti_properties.cc",
"ti_stack.cc",
"ti_redefine.cc",
+ "ti_thread.cc",
"transform.cc"],
include_dirs: ["art/runtime"],
shared_libs: [
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index c52dd76b59..8674e6cae7 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -55,6 +55,7 @@
#include "ti_properties.h"
#include "ti_redefine.h"
#include "ti_stack.h"
+#include "ti_thread.h"
#include "transform.h"
// TODO Remove this at some point by annotating all the methods. It was put in to make the skeleton
@@ -117,15 +118,15 @@ class JvmtiFunctions {
}
static jvmtiError GetThreadState(jvmtiEnv* env, jthread thread, jint* thread_state_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetThreadState(env, thread, thread_state_ptr);
}
static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetCurrentThread(env, thread_ptr);
}
static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetAllThreads(env, threads_count_ptr, threads_ptr);
}
static jvmtiError SuspendThread(jvmtiEnv* env, jthread thread) {
@@ -159,7 +160,7 @@ class JvmtiFunctions {
}
static jvmtiError GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetThreadInfo(env, thread, info_ptr);
}
static jvmtiError GetOwnedMonitorInfo(jvmtiEnv* env,
@@ -237,7 +238,7 @@ class JvmtiFunctions {
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr,
jint* thread_count_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::GetAllStackTraces(env, max_frame_count, stack_info_ptr, thread_count_ptr);
}
static jvmtiError GetThreadListStackTraces(jvmtiEnv* env,
@@ -245,11 +246,15 @@ class JvmtiFunctions {
const jthread* thread_list,
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::GetThreadListStackTraces(env,
+ thread_count,
+ thread_list,
+ max_frame_count,
+ stack_info_ptr);
}
static jvmtiError GetFrameCount(jvmtiEnv* env, jthread thread, jint* count_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::GetFrameCount(env, thread, count_ptr);
}
static jvmtiError PopFrame(jvmtiEnv* env, jthread thread) {
@@ -261,7 +266,7 @@ class JvmtiFunctions {
jint depth,
jmethodID* method_ptr,
jlocation* location_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::GetFrameLocation(env, thread, depth, method_ptr, location_ptr);
}
static jvmtiError NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
@@ -621,7 +626,17 @@ class JvmtiFunctions {
static jvmtiError RedefineClasses(jvmtiEnv* env,
jint class_count,
const jvmtiClassDefinition* class_definitions) {
- return ERR(NOT_IMPLEMENTED);
+ std::string error_msg;
+ jvmtiError res = Redefiner::RedefineClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
+ art::Runtime::Current(),
+ art::Thread::Current(),
+ class_count,
+ class_definitions,
+ &error_msg);
+ if (res != OK) {
+ LOG(WARNING) << "FAILURE TO REDEFINE " << error_msg;
+ }
+ return res;
}
static jvmtiError GetObjectSize(jvmtiEnv* env, jobject object, jlong* size_ptr) {
@@ -1169,34 +1184,6 @@ class JvmtiFunctions {
return RetransformClassesWithHook(reinterpret_cast<ArtJvmTiEnv*>(env), classes, hook);
}
- static jvmtiError RedefineClassDirect(ArtJvmTiEnv* env,
- jclass klass,
- jint dex_size,
- unsigned char* dex_file) {
- if (!IsValidEnv(env)) {
- return ERR(INVALID_ENVIRONMENT);
- }
- jvmtiError ret = OK;
- std::string location;
- if ((ret = GetClassLocation(env, klass, &location)) != OK) {
- // TODO Do something more here? Maybe give log statements?
- return ret;
- }
- std::string error;
- ret = Redefiner::RedefineClass(env,
- art::Runtime::Current(),
- art::Thread::Current(),
- klass,
- location,
- dex_size,
- reinterpret_cast<uint8_t*>(dex_file),
- &error);
- if (ret != OK) {
- LOG(WARNING) << "FAILURE TO REDEFINE " << error;
- }
- return ret;
- }
-
// TODO This will be called by the event handler for the art::ti Event Load Event
static jvmtiError RetransformClassesWithHook(ArtJvmTiEnv* env,
const std::vector<jclass>& classes,
@@ -1241,14 +1228,13 @@ class JvmtiFunctions {
/*out*/&new_dex_data);
// Check if anything actually changed.
if ((new_data_len != 0 || new_dex_data != nullptr) && new_dex_data != dex_data) {
- res = Redefiner::RedefineClass(env,
- art::Runtime::Current(),
- art::Thread::Current(),
- klass,
- location,
- new_data_len,
- new_dex_data,
- &error);
+ jvmtiClassDefinition def = { klass, new_data_len, new_dex_data };
+ res = Redefiner::RedefineClasses(env,
+ art::Runtime::Current(),
+ art::Thread::Current(),
+ 1,
+ &def,
+ &error);
env->Deallocate(new_dex_data);
}
// Deallocate the old dex data.
@@ -1307,10 +1293,7 @@ const jvmtiInterface_1 gJvmtiInterface = {
reinterpret_cast<void*>(JvmtiFunctions::RetransformClassWithHook),
// nullptr, // reserved1
JvmtiFunctions::SetEventNotificationMode,
- // SPECIAL FUNCTION: RedefineClassDirect Is normally reserved3
- // TODO Remove once we have events working.
- reinterpret_cast<void*>(JvmtiFunctions::RedefineClassDirect),
- // nullptr, // reserved3
+ nullptr, // reserved3
JvmtiFunctions::GetAllThreads,
JvmtiFunctions::SuspendThread,
JvmtiFunctions::ResumeThread,
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 5bf844564c..14477a1db9 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -53,6 +53,7 @@
#include "object_lock.h"
#include "runtime.h"
#include "ScopedLocalRef.h"
+#include "transform.h"
namespace openjdkjvmti {
@@ -207,7 +208,7 @@ jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class>
// Moves dex data to an anonymous, read-only mmap'd region.
std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
jint data_len,
- unsigned char* dex_data,
+ const unsigned char* dex_data,
std::string* error_msg) {
std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
@@ -227,34 +228,87 @@ std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& orig
return map;
}
+Redefiner::ClassRedefinition::ClassRedefinition(Redefiner* driver,
+ jclass klass,
+ const art::DexFile* redefined_dex_file,
+ const char* class_sig) :
+ driver_(driver), klass_(klass), dex_file_(redefined_dex_file), class_sig_(class_sig) {
+ GetMirrorClass()->MonitorEnter(driver_->self_);
+}
+
+Redefiner::ClassRedefinition::~ClassRedefinition() {
+ if (driver_ != nullptr) {
+ GetMirrorClass()->MonitorExit(driver_->self_);
+ }
+}
+
// TODO This should handle doing multiple classes at once so we need to do less cleanup when things
// go wrong.
-jvmtiError Redefiner::RedefineClass(ArtJvmTiEnv* env,
- art::Runtime* runtime,
- art::Thread* self,
- jclass klass,
- const std::string& original_dex_location,
- jint data_len,
- unsigned char* dex_data,
- std::string* error_msg) {
+jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
+ art::Runtime* runtime,
+ art::Thread* self,
+ jint class_count,
+ const jvmtiClassDefinition* definitions,
+ std::string* error_msg) {
+ if (env == nullptr) {
+ *error_msg = "env was null!";
+ return ERR(INVALID_ENVIRONMENT);
+ } else if (class_count < 0) {
+ *error_msg = "class_count was less then 0";
+ return ERR(ILLEGAL_ARGUMENT);
+ } else if (class_count == 0) {
+ // We don't actually need to do anything. Just return OK.
+ return OK;
+ } else if (definitions == nullptr) {
+ *error_msg = "null definitions!";
+ return ERR(NULL_POINTER);
+ }
+ // Stop JIT for the duration of this redefine since the JIT might concurrently compile a method we
+ // are going to redefine.
+ art::jit::ScopedJitSuspend suspend_jit;
+ // Get shared mutator lock so we can lock all the classes.
+ art::ScopedObjectAccess soa(self);
+ std::vector<Redefiner::ClassRedefinition> redefinitions;
+ redefinitions.reserve(class_count);
+ Redefiner r(runtime, self, error_msg);
+ for (jint i = 0; i < class_count; i++) {
+ jvmtiError res = r.AddRedefinition(env, definitions[i]);
+ if (res != OK) {
+ return res;
+ }
+ }
+ return r.Run();
+}
+
+jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const jvmtiClassDefinition& def) {
+ std::string original_dex_location;
+ jvmtiError ret = OK;
+ if ((ret = GetClassLocation(env, def.klass, &original_dex_location))) {
+ *error_msg_ = "Unable to get original dex file location!";
+ return ret;
+ }
std::unique_ptr<art::MemMap> map(MoveDataToMemMap(original_dex_location,
- data_len,
- dex_data,
- error_msg));
+ def.class_byte_count,
+ def.class_bytes,
+ error_msg_));
std::ostringstream os;
char* generic_ptr_unused = nullptr;
char* signature_ptr = nullptr;
- if (env->GetClassSignature(klass, &signature_ptr, &generic_ptr_unused) != OK) {
- signature_ptr = const_cast<char*>("<UNKNOWN CLASS>");
+ if (env->GetClassSignature(def.klass, &signature_ptr, &generic_ptr_unused) != OK) {
+ *error_msg_ = "A jclass passed in does not seem to be valid";
+ return ERR(INVALID_CLASS);
}
+ // These will make sure we deallocate the signature.
+ JvmtiUniquePtr sig_unique_ptr(MakeJvmtiUniquePtr(env, signature_ptr));
+ JvmtiUniquePtr generic_unique_ptr(MakeJvmtiUniquePtr(env, generic_ptr_unused));
if (map.get() == nullptr) {
os << "Failed to create anonymous mmap for modified dex file of class " << signature_ptr
- << "in dex file " << original_dex_location << " because: " << *error_msg;
- *error_msg = os.str();
+ << "in dex file " << original_dex_location << " because: " << *error_msg_;
+ *error_msg_ = os.str();
return ERR(OUT_OF_MEMORY);
}
if (map->Size() < sizeof(art::DexFile::Header)) {
- *error_msg = "Could not read dex file header because dex_data was too short";
+ *error_msg_ = "Could not read dex file header because dex_data was too short";
return ERR(INVALID_CLASS_FORMAT);
}
uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
@@ -263,28 +317,21 @@ jvmtiError Redefiner::RedefineClass(ArtJvmTiEnv* env,
std::move(map),
/*verify*/true,
/*verify_checksum*/true,
- error_msg));
+ error_msg_));
if (dex_file.get() == nullptr) {
- os << "Unable to load modified dex file for " << signature_ptr << ": " << *error_msg;
- *error_msg = os.str();
+ os << "Unable to load modified dex file for " << signature_ptr << ": " << *error_msg_;
+ *error_msg_ = os.str();
return ERR(INVALID_CLASS_FORMAT);
}
- // Stop JIT for the duration of this redefine since the JIT might concurrently compile a method we
- // are going to redefine.
- art::jit::ScopedJitSuspend suspend_jit;
- // Get shared mutator lock.
- art::ScopedObjectAccess soa(self);
- art::StackHandleScope<1> hs(self);
- Redefiner r(runtime, self, klass, signature_ptr, dex_file, error_msg);
- // Lock around this class to avoid races.
- art::ObjectLock<art::mirror::Class> lock(self, hs.NewHandle(r.GetMirrorClass()));
- return r.Run();
+ redefinitions_.push_back(
+ Redefiner::ClassRedefinition(this, def.klass, dex_file.release(), signature_ptr));
+ return OK;
}
// TODO *MAJOR* This should return the actual source java.lang.DexFile object for the klass.
// TODO Make mirror of DexFile and associated types to make this less hellish.
// TODO Make mirror of BaseDexClassLoader and associated types to make this less hellish.
-art::mirror::Object* Redefiner::FindSourceDexFileObject(
+art::mirror::Object* Redefiner::ClassRedefinition::FindSourceDexFileObject(
art::Handle<art::mirror::ClassLoader> loader) {
const char* dex_path_list_element_array_name = "[Ldalvik/system/DexPathList$Element;";
const char* dex_path_list_element_name = "Ldalvik/system/DexPathList$Element;";
@@ -292,14 +339,14 @@ art::mirror::Object* Redefiner::FindSourceDexFileObject(
const char* dex_path_list_name = "Ldalvik/system/DexPathList;";
const char* dex_class_loader_name = "Ldalvik/system/BaseDexClassLoader;";
- CHECK(!self_->IsExceptionPending());
- art::StackHandleScope<11> hs(self_);
- art::ClassLinker* class_linker = runtime_->GetClassLinker();
+ CHECK(!driver_->self_->IsExceptionPending());
+ art::StackHandleScope<11> hs(driver_->self_);
+ art::ClassLinker* class_linker = driver_->runtime_->GetClassLinker();
art::Handle<art::mirror::ClassLoader> null_loader(hs.NewHandle<art::mirror::ClassLoader>(
nullptr));
art::Handle<art::mirror::Class> base_dex_loader_class(hs.NewHandle(class_linker->FindClass(
- self_, dex_class_loader_name, null_loader)));
+ driver_->self_, dex_class_loader_name, null_loader)));
// Get all the ArtFields so we can look in the BaseDexClassLoader
art::ArtField* path_list_field = base_dex_loader_class->FindDeclaredInstanceField(
@@ -307,12 +354,12 @@ art::mirror::Object* Redefiner::FindSourceDexFileObject(
CHECK(path_list_field != nullptr);
art::ArtField* dex_path_list_element_field =
- class_linker->FindClass(self_, dex_path_list_name, null_loader)
+ class_linker->FindClass(driver_->self_, dex_path_list_name, null_loader)
->FindDeclaredInstanceField("dexElements", dex_path_list_element_array_name);
CHECK(dex_path_list_element_field != nullptr);
art::ArtField* element_dex_file_field =
- class_linker->FindClass(self_, dex_path_list_element_name, null_loader)
+ class_linker->FindClass(driver_->self_, dex_path_list_element_name, null_loader)
->FindDeclaredInstanceField("dexFile", dex_file_name);
CHECK(element_dex_file_field != nullptr);
@@ -327,11 +374,11 @@ art::mirror::Object* Redefiner::FindSourceDexFileObject(
art::Handle<art::mirror::Object> path_list(
hs.NewHandle(path_list_field->GetObject(loader.Get())));
CHECK(path_list.Get() != nullptr);
- CHECK(!self_->IsExceptionPending());
+ CHECK(!driver_->self_->IsExceptionPending());
art::Handle<art::mirror::ObjectArray<art::mirror::Object>> dex_elements_list(hs.NewHandle(
dex_path_list_element_field->GetObject(path_list.Get())->
AsObjectArray<art::mirror::Object>()));
- CHECK(!self_->IsExceptionPending());
+ CHECK(!driver_->self_->IsExceptionPending());
CHECK(dex_elements_list.Get() != nullptr);
size_t num_elements = dex_elements_list->GetLength();
art::MutableHandle<art::mirror::Object> current_element(
@@ -343,9 +390,9 @@ art::mirror::Object* Redefiner::FindSourceDexFileObject(
for (size_t i = 0; i < num_elements; i++) {
current_element.Assign(dex_elements_list->Get(i));
CHECK(current_element.Get() != nullptr);
- CHECK(!self_->IsExceptionPending());
+ CHECK(!driver_->self_->IsExceptionPending());
CHECK(dex_elements_list.Get() != nullptr);
- CHECK_EQ(current_element->GetClass(), class_linker->FindClass(self_,
+ CHECK_EQ(current_element->GetClass(), class_linker->FindClass(driver_->self_,
dex_path_list_element_name,
null_loader));
// TODO It would be cleaner to put the art::DexFile into the dalvik.system.DexFile the class
@@ -360,22 +407,23 @@ art::mirror::Object* Redefiner::FindSourceDexFileObject(
return nullptr;
}
-art::mirror::Class* Redefiner::GetMirrorClass() {
- return self_->DecodeJObject(klass_)->AsClass();
+art::mirror::Class* Redefiner::ClassRedefinition::GetMirrorClass() {
+ return driver_->self_->DecodeJObject(klass_)->AsClass();
}
-art::mirror::ClassLoader* Redefiner::GetClassLoader() {
+art::mirror::ClassLoader* Redefiner::ClassRedefinition::GetClassLoader() {
return GetMirrorClass()->GetClassLoader();
}
-art::mirror::DexCache* Redefiner::CreateNewDexCache(art::Handle<art::mirror::ClassLoader> loader) {
- return runtime_->GetClassLinker()->RegisterDexFile(*dex_file_, loader.Get());
+art::mirror::DexCache* Redefiner::ClassRedefinition::CreateNewDexCache(
+ art::Handle<art::mirror::ClassLoader> loader) {
+ return driver_->runtime_->GetClassLinker()->RegisterDexFile(*dex_file_, loader.Get());
}
// TODO Really wishing I had that mirror of java.lang.DexFile now.
-art::mirror::LongArray* Redefiner::AllocateDexFileCookie(
+art::mirror::LongArray* Redefiner::ClassRedefinition::AllocateDexFileCookie(
art::Handle<art::mirror::Object> java_dex_file_obj) {
- art::StackHandleScope<2> hs(self_);
+ art::StackHandleScope<2> hs(driver_->self_);
// mCookie is nulled out if the DexFile has been closed but mInternalCookie sticks around until
// the object is finalized. Since they always point to the same array if mCookie is not null we
// just use the mInternalCookie field. We will update one or both of these fields later.
@@ -390,9 +438,9 @@ art::mirror::LongArray* Redefiner::AllocateDexFileCookie(
CHECK(cookie.Get() != nullptr);
CHECK_GE(cookie->GetLength(), 1);
art::Handle<art::mirror::LongArray> new_cookie(
- hs.NewHandle(art::mirror::LongArray::Alloc(self_, cookie->GetLength() + 1)));
+ hs.NewHandle(art::mirror::LongArray::Alloc(driver_->self_, cookie->GetLength() + 1)));
if (new_cookie.Get() == nullptr) {
- self_->AssertPendingOOMException();
+ driver_->self_->AssertPendingOOMException();
return nullptr;
}
// Copy the oat-dex field at the start.
@@ -405,19 +453,21 @@ art::mirror::LongArray* Redefiner::AllocateDexFileCookie(
return new_cookie.Get();
}
-void Redefiner::RecordFailure(jvmtiError result, const std::string& error_msg) {
+void Redefiner::RecordFailure(jvmtiError result,
+ const std::string& class_sig,
+ const std::string& error_msg) {
*error_msg_ = StringPrintf("Unable to perform redefinition of '%s': %s",
- class_sig_,
+ class_sig.c_str(),
error_msg.c_str());
result_ = result;
}
-bool Redefiner::FinishRemainingAllocations(
+bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
/*out*/art::MutableHandle<art::mirror::ClassLoader>* source_class_loader,
/*out*/art::MutableHandle<art::mirror::Object>* java_dex_file_obj,
/*out*/art::MutableHandle<art::mirror::LongArray>* new_dex_file_cookie,
/*out*/art::MutableHandle<art::mirror::DexCache>* new_dex_cache) {
- art::StackHandleScope<4> hs(self_);
+ art::StackHandleScope<4> hs(driver_->self_);
// This shouldn't allocate
art::Handle<art::mirror::ClassLoader> loader(hs.NewHandle(GetClassLoader()));
if (loader.Get() == nullptr) {
@@ -433,15 +483,15 @@ bool Redefiner::FinishRemainingAllocations(
}
art::Handle<art::mirror::LongArray> new_cookie(hs.NewHandle(AllocateDexFileCookie(dex_file_obj)));
if (new_cookie.Get() == nullptr) {
- self_->AssertPendingOOMException();
- self_->ClearException();
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
return false;
}
art::Handle<art::mirror::DexCache> dex_cache(hs.NewHandle(CreateNewDexCache(loader)));
if (dex_cache.Get() == nullptr) {
- self_->AssertPendingOOMException();
- self_->ClearException();
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate DexCache");
return false;
}
@@ -453,13 +503,11 @@ bool Redefiner::FinishRemainingAllocations(
}
struct CallbackCtx {
- Redefiner* const r;
art::LinearAlloc* allocator;
std::unordered_map<art::ArtMethod*, art::ArtMethod*> obsolete_map;
std::unordered_set<art::ArtMethod*> obsolete_methods;
- CallbackCtx(Redefiner* self, art::LinearAlloc* alloc)
- : r(self), allocator(alloc) {}
+ explicit CallbackCtx(art::LinearAlloc* alloc) : allocator(alloc) {}
};
void DoAllocateObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS {
@@ -472,11 +520,12 @@ void DoAllocateObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SA
// This creates any ArtMethod* structures needed for obsolete methods and ensures that the stack is
// updated so they will be run.
-void Redefiner::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) {
+// TODO Rewrite so we can do this only once regardless of how many redefinitions there are.
+void Redefiner::ClassRedefinition::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) {
art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking");
art::mirror::ClassExt* ext = art_klass->GetExtData();
CHECK(ext->GetObsoleteMethods() != nullptr);
- CallbackCtx ctx(this, art_klass->GetClassLoader()->GetAllocator());
+ CallbackCtx ctx(art_klass->GetClassLoader()->GetAllocator());
// Add all the declared methods to the map
for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) {
ctx.obsolete_methods.insert(&m);
@@ -484,7 +533,7 @@ void Redefiner::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) {
DCHECK(!m.IsIntrinsic());
}
{
- art::MutexLock mu(self_, *art::Locks::thread_list_lock_);
+ art::MutexLock mu(driver_->self_, *art::Locks::thread_list_lock_);
art::ThreadList* list = art::Runtime::Current()->GetThreadList();
list->ForEach(DoAllocateObsoleteMethodsCallback, static_cast<void*>(&ctx));
}
@@ -493,7 +542,7 @@ void Redefiner::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) {
// Fills the obsolete method map in the art_klass's extData. This is so obsolete methods are able to
// figure out their DexCaches.
-void Redefiner::FillObsoleteMethodMap(
+void Redefiner::ClassRedefinition::FillObsoleteMethodMap(
art::mirror::Class* art_klass,
const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes) {
int32_t index = 0;
@@ -532,9 +581,9 @@ void Redefiner::EnsureObsoleteMethodsAreDeoptimized() {
i->ReJitEverything("libOpenJkdJvmti - Class Redefinition");
}
-bool Redefiner::CheckClass() {
+bool Redefiner::ClassRedefinition::CheckClass() {
// TODO Might just want to put it in a ObjPtr and NoSuspend assert.
- art::StackHandleScope<1> hs(self_);
+ art::StackHandleScope<1> hs(driver_->self_);
// Easy check that only 1 class def is present.
if (dex_file_->NumClassDefs() != 1) {
RecordFailure(ERR(ILLEGAL_ARGUMENT),
@@ -607,14 +656,15 @@ bool Redefiner::CheckClass() {
}
}
LOG(WARNING) << "No verification is done on annotations of redefined classes.";
+ LOG(WARNING) << "Bytecodes of redefinitions are not verified.";
return true;
}
// TODO Move this to use IsRedefinable when that function is made.
-bool Redefiner::CheckRedefinable() {
+bool Redefiner::ClassRedefinition::CheckRedefinable() {
std::string err;
- art::StackHandleScope<1> hs(self_);
+ art::StackHandleScope<1> hs(driver_->self_);
art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
jvmtiError res = Redefiner::GetClassRedefinitionError(h_klass, &err);
@@ -626,46 +676,200 @@ bool Redefiner::CheckRedefinable() {
}
}
-bool Redefiner::CheckRedefinitionIsValid() {
+bool Redefiner::ClassRedefinition::CheckRedefinitionIsValid() {
return CheckRedefinable() &&
CheckClass() &&
CheckSameFields() &&
CheckSameMethods();
}
-jvmtiError Redefiner::Run() {
- art::StackHandleScope<5> hs(self_);
- // TODO We might want to have a global lock (or one based on the class being redefined at least)
- // in order to make cleanup easier. Not a huge deal though.
- //
- // First we just allocate the ClassExt and its fields that we need. These can be updated
- // atomically without any issues (since we allocate the map arrays as empty) so we don't bother
- // doing a try loop. The other allocations we need to ensure that nothing has changed in the time
- // between allocating them and pausing all threads before we can update them so we need to do a
- // try loop.
- if (!CheckRedefinitionIsValid() || !EnsureClassAllocationsFinished()) {
- return result_;
+// A wrapper that lets us hold onto the arbitrary sized data needed for redefinitions in a
+// reasonably sane way. This adds no fields to the normal ObjectArray. By doing this we can avoid
+// having to deal with the fact that we need to hold an arbitrary number of references live.
+class RedefinitionDataHolder {
+ public:
+ enum DataSlot : int32_t {
+ kSlotSourceClassLoader = 0,
+ kSlotJavaDexFile = 1,
+ kSlotNewDexFileCookie = 2,
+ kSlotNewDexCache = 3,
+ kSlotMirrorClass = 4,
+
+ // Must be last one.
+ kNumSlots = 5,
+ };
+
+ // This needs to have a HandleScope passed in that is capable of creating a new Handle without
+ // overflowing. Only one handle will be created. This object has a lifetime identical to that of
+ // the passed in handle-scope.
+ RedefinitionDataHolder(art::StackHandleScope<1>* hs,
+ art::Runtime* runtime,
+ art::Thread* self,
+ int32_t num_redefinitions) REQUIRES_SHARED(art::Locks::mutator_lock_) :
+ arr_(
+ hs->NewHandle(
+ art::mirror::ObjectArray<art::mirror::Object>::Alloc(
+ self,
+ runtime->GetClassLinker()->GetClassRoot(art::ClassLinker::kObjectArrayClass),
+ num_redefinitions * kNumSlots))) {}
+
+ bool IsNull() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return arr_.IsNull();
+ }
+
+ // TODO Maybe make an iterable view type to simplify using this.
+ art::mirror::ClassLoader* GetSourceClassLoader(jint klass_index)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::ClassLoader*>(GetSlot(klass_index, kSlotSourceClassLoader));
+ }
+ art::mirror::Object* GetJavaDexFile(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return GetSlot(klass_index, kSlotJavaDexFile);
+ }
+ art::mirror::LongArray* GetNewDexFileCookie(jint klass_index)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::LongArray*>(GetSlot(klass_index, kSlotNewDexFileCookie));
+ }
+ art::mirror::DexCache* GetNewDexCache(jint klass_index)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::DexCache*>(GetSlot(klass_index, kSlotNewDexCache));
+ }
+ art::mirror::Class* GetMirrorClass(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::down_cast<art::mirror::Class*>(GetSlot(klass_index, kSlotMirrorClass));
+ }
+
+ void SetSourceClassLoader(jint klass_index, art::mirror::ClassLoader* loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotSourceClassLoader, loader);
+ }
+ void SetJavaDexFile(jint klass_index, art::mirror::Object* dexfile)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotJavaDexFile, dexfile);
+ }
+ void SetNewDexFileCookie(jint klass_index, art::mirror::LongArray* cookie)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotNewDexFileCookie, cookie);
+ }
+ void SetNewDexCache(jint klass_index, art::mirror::DexCache* cache)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotNewDexCache, cache);
}
+ void SetMirrorClass(jint klass_index, art::mirror::Class* klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ SetSlot(klass_index, kSlotMirrorClass, klass);
+ }
+
+ int32_t Length() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return arr_->GetLength() / kNumSlots;
+ }
+
+ private:
+ art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
+
+ art::mirror::Object* GetSlot(jint klass_index,
+ DataSlot slot) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK_LT(klass_index, Length());
+ return arr_->Get((kNumSlots * klass_index) + slot);
+ }
+
+ void SetSlot(jint klass_index,
+ DataSlot slot,
+ art::ObjPtr<art::mirror::Object> obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK(!art::Runtime::Current()->IsActiveTransaction());
+ DCHECK_LT(klass_index, Length());
+ arr_->Set<false>((kNumSlots * klass_index) + slot, obj);
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(RedefinitionDataHolder);
+};
+
+bool Redefiner::CheckAllRedefinitionAreValid() {
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ if (!redef.CheckRedefinitionIsValid()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Redefiner::EnsureAllClassAllocationsFinished() {
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ if (!redef.EnsureClassAllocationsFinished()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Redefiner::FinishAllRemainingAllocations(RedefinitionDataHolder& holder) {
+ int32_t cnt = 0;
+ art::StackHandleScope<4> hs(self_);
+ art::MutableHandle<art::mirror::Object> java_dex_file(hs.NewHandle<art::mirror::Object>(nullptr));
art::MutableHandle<art::mirror::ClassLoader> source_class_loader(
hs.NewHandle<art::mirror::ClassLoader>(nullptr));
- art::MutableHandle<art::mirror::Object> java_dex_file(
- hs.NewHandle<art::mirror::Object>(nullptr));
art::MutableHandle<art::mirror::LongArray> new_dex_file_cookie(
hs.NewHandle<art::mirror::LongArray>(nullptr));
art::MutableHandle<art::mirror::DexCache> new_dex_cache(
hs.NewHandle<art::mirror::DexCache>(nullptr));
- if (!FinishRemainingAllocations(&source_class_loader,
- &java_dex_file,
- &new_dex_file_cookie,
- &new_dex_cache)) {
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ // Reset the out pointers to null
+ source_class_loader.Assign(nullptr);
+ java_dex_file.Assign(nullptr);
+ new_dex_file_cookie.Assign(nullptr);
+ new_dex_cache.Assign(nullptr);
+ // Allocate the data this redefinition requires.
+ if (!redef.FinishRemainingAllocations(&source_class_loader,
+ &java_dex_file,
+ &new_dex_file_cookie,
+ &new_dex_cache)) {
+ return false;
+ }
+ // Save the allocated data into the holder.
+ holder.SetSourceClassLoader(cnt, source_class_loader.Get());
+ holder.SetJavaDexFile(cnt, java_dex_file.Get());
+ holder.SetNewDexFileCookie(cnt, new_dex_file_cookie.Get());
+ holder.SetNewDexCache(cnt, new_dex_cache.Get());
+ holder.SetMirrorClass(cnt, redef.GetMirrorClass());
+ cnt++;
+ }
+ return true;
+}
+
+void Redefiner::ClassRedefinition::ReleaseDexFile() {
+ dex_file_.release();
+}
+
+void Redefiner::ReleaseAllDexFiles() {
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ redef.ReleaseDexFile();
+ }
+}
+
+jvmtiError Redefiner::Run() {
+ art::StackHandleScope<1> hs(self_);
+ // Allocate an array to hold onto all java temporary objects associated with this redefinition.
+ // We will let this be collected after the end of this function.
+ RedefinitionDataHolder holder(&hs, runtime_, self_, redefinitions_.size());
+ if (holder.IsNull()) {
+ self_->AssertPendingOOMException();
+ self_->ClearException();
+ RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate storage for temporaries");
+ return result_;
+ }
+
+ // First we just allocate the ClassExt and its fields that we need. These can be updated
+ // atomically without any issues (since we allocate the map arrays as empty) so we don't bother
+ // doing a try loop. The other allocations we need to ensure that nothing has changed in the time
+ // between allocating them and pausing all threads before we can update them so we need to do a
+ // try loop.
+ if (!CheckAllRedefinitionAreValid() ||
+ !EnsureAllClassAllocationsFinished() ||
+ !FinishAllRemainingAllocations(holder)) {
// TODO Null out the ClassExt fields we allocated (if possible, might be racing with another
// redefineclass call which made it even bigger. Leak shouldn't be huge (2x array of size
- // declared_methods_.length) but would be good to get rid of.
- // new_dex_file_cookie & new_dex_cache should be cleaned up by the GC.
+ // declared_methods_.length) but would be good to get rid of. All other allocations should be
+ // cleaned up by the GC eventually.
return result_;
}
- // Get the mirror class now that we aren't allocating anymore.
- art::Handle<art::mirror::Class> art_class(hs.NewHandle(GetMirrorClass()));
// Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
// allocating so no deadlocks.
art::gc::Heap* heap = runtime_->GetHeap();
@@ -673,31 +877,29 @@ jvmtiError Redefiner::Run() {
// GC moving objects can cause deadlocks as we are deoptimizing the stack.
heap->IncrementDisableMovingGC(self_);
}
- // Enable assertion that this thread isn't interrupted during this installation.
- // After this we will need to do real cleanup in case of failure. Prior to this we could simply
- // return and would let everything get cleaned up or harmlessly leaked.
// Do transition to final suspension
// TODO We might want to give this its own suspended state!
// TODO This isn't right. We need to change state without any chance of suspend ideally!
self_->TransitionFromRunnableToSuspended(art::ThreadState::kNative);
runtime_->GetThreadList()->SuspendAll(
- "Final installation of redefined Class!", /*long_suspend*/true);
+ "Final installation of redefined Classes!", /*long_suspend*/true);
// TODO We need to invalidate all breakpoints in the redefined class with the debugger.
// TODO We need to deal with any instrumentation/debugger deoptimized_methods_.
// TODO We need to update all debugger MethodIDs so they note the method they point to is
// obsolete or implement some other well defined semantics.
// TODO We need to decide on & implement semantics for JNI jmethodids when we redefine methods.
- // TODO Might want to move this into a different type.
- // Now we reach the part where we must do active cleanup if something fails.
- // TODO We should really Retry if this fails instead of simply aborting.
- // Set the new DexFileCookie returns the original so we can fix it back up if redefinition fails
- art::ObjPtr<art::mirror::LongArray> original_dex_file_cookie(nullptr);
- UpdateJavaDexFile(java_dex_file.Get(), new_dex_file_cookie.Get(), &original_dex_file_cookie);
- FindAndAllocateObsoleteMethods(art_class.Get());
- UpdateClass(art_class.Get(), new_dex_cache.Get());
+ int32_t cnt = 0;
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ art::mirror::Class* klass = holder.GetMirrorClass(cnt);
+ redef.UpdateJavaDexFile(holder.GetJavaDexFile(cnt), holder.GetNewDexFileCookie(cnt));
+ // TODO Rewrite so we don't do a stack walk for each and every class.
+ redef.FindAndAllocateObsoleteMethods(klass);
+ redef.UpdateClass(klass, holder.GetNewDexCache(cnt));
+ cnt++;
+ }
// Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
// pointers to their ArtMethod's stashed in registers that they then use to attempt to hit the
- // DexCache.
+ // DexCache. (b/33630159)
// TODO This can fail (leave some methods optimized) near runtime methods (including
// quick-to-interpreter transition function).
// TODO We probably don't need this at all once we have a way to ensure that the
@@ -705,26 +907,25 @@ jvmtiError Redefiner::Run() {
// stack-walker.
EnsureObsoleteMethodsAreDeoptimized();
// TODO Verify the new Class.
- // TODO Failure then undo updates to class
// TODO Shrink the obsolete method maps if possible?
// TODO find appropriate class loader.
// TODO Put this into a scoped thing.
runtime_->GetThreadList()->ResumeAll();
// Get back shared mutator lock as expected for return.
self_->TransitionFromSuspendedToRunnable();
- // TODO Do the dex_file_ release at a more reasonable place. This works but it muddles who really
- // owns the DexFile.
- dex_file_.release();
+ // TODO Do the dex_file release at a more reasonable place. This works but it muddles who really
+ // owns the DexFile and when ownership is transferred.
+ ReleaseAllDexFiles();
if (heap->IsGcConcurrentAndMoving()) {
heap->DecrementDisableMovingGC(self_);
}
return OK;
}
-void Redefiner::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
- art::ObjPtr<art::mirror::DexCache> new_dex_cache,
- const art::DexFile::ClassDef& class_def) {
- art::ClassLinker* linker = runtime_->GetClassLinker();
+void Redefiner::ClassRedefinition::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
+ art::ObjPtr<art::mirror::DexCache> new_dex_cache,
+ const art::DexFile::ClassDef& class_def) {
+ art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
art::PointerSize image_pointer_size = linker->GetImagePointerSize();
const art::DexFile::TypeId& declaring_class_id = dex_file_->GetTypeId(class_def.class_idx_);
const art::DexFile& old_dex_file = mclass->GetDexFile();
@@ -759,14 +960,14 @@ void Redefiner::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
method.SetDexCacheResolvedMethods(new_dex_cache->GetResolvedMethods(), image_pointer_size);
method.SetDexCacheResolvedTypes(new_dex_cache->GetResolvedTypes(), image_pointer_size);
// Notify the jit that this method is redefined.
- art::jit::Jit* jit = runtime_->GetJit();
+ art::jit::Jit* jit = driver_->runtime_->GetJit();
if (jit != nullptr) {
jit->GetCodeCache()->NotifyMethodRedefined(&method);
}
}
}
-void Redefiner::UpdateFields(art::ObjPtr<art::mirror::Class> mclass) {
+void Redefiner::ClassRedefinition::UpdateFields(art::ObjPtr<art::mirror::Class> mclass) {
// TODO The IFields & SFields pointers should be combined like the methods_ arrays were.
for (auto fields_iter : {mclass->GetIFields(), mclass->GetSFields()}) {
for (art::ArtField& field : fields_iter) {
@@ -787,10 +988,10 @@ void Redefiner::UpdateFields(art::ObjPtr<art::mirror::Class> mclass) {
}
// Performs updates to class that will allow us to verify it.
-void Redefiner::UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
- art::ObjPtr<art::mirror::DexCache> new_dex_cache) {
+void Redefiner::ClassRedefinition::UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
+ art::ObjPtr<art::mirror::DexCache> new_dex_cache) {
const art::DexFile::ClassDef* class_def = art::OatFile::OatDexFile::FindClassDef(
- *dex_file_, class_sig_, art::ComputeModifiedUtf8Hash(class_sig_));
+ *dex_file_, class_sig_.c_str(), art::ComputeModifiedUtf8Hash(class_sig_.c_str()));
DCHECK(class_def != nullptr);
UpdateMethods(mclass, new_dex_cache, *class_def);
UpdateFields(mclass);
@@ -800,12 +1001,12 @@ void Redefiner::UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
// to call GetReturnTypeDescriptor and GetParameterTypeList above).
mclass->SetDexCache(new_dex_cache.Ptr());
mclass->SetDexClassDefIndex(dex_file_->GetIndexForClassDef(*class_def));
- mclass->SetDexTypeIndex(dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(class_sig_)));
+ mclass->SetDexTypeIndex(dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(class_sig_.c_str())));
}
-void Redefiner::UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
- art::ObjPtr<art::mirror::LongArray> new_cookie,
- /*out*/art::ObjPtr<art::mirror::LongArray>* original_cookie) {
+void Redefiner::ClassRedefinition::UpdateJavaDexFile(
+ art::ObjPtr<art::mirror::Object> java_dex_file,
+ art::ObjPtr<art::mirror::LongArray> new_cookie) {
art::ArtField* internal_cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField(
"mInternalCookie", "Ljava/lang/Object;");
art::ArtField* cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField(
@@ -816,7 +1017,6 @@ void Redefiner::UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file
art::ObjPtr<art::mirror::LongArray> orig_cookie(
cookie_field->GetObject(java_dex_file)->AsLongArray());
internal_cookie_field->SetObject<false>(java_dex_file, new_cookie);
- *original_cookie = orig_internal_cookie;
if (!orig_cookie.IsNull()) {
cookie_field->SetObject<false>(java_dex_file, new_cookie);
}
@@ -824,21 +1024,22 @@ void Redefiner::UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file
// This function does all (java) allocations we need to do for the Class being redefined.
// TODO Change this name maybe?
-bool Redefiner::EnsureClassAllocationsFinished() {
- art::StackHandleScope<2> hs(self_);
- art::Handle<art::mirror::Class> klass(hs.NewHandle(self_->DecodeJObject(klass_)->AsClass()));
+bool Redefiner::ClassRedefinition::EnsureClassAllocationsFinished() {
+ art::StackHandleScope<2> hs(driver_->self_);
+ art::Handle<art::mirror::Class> klass(hs.NewHandle(
+ driver_->self_->DecodeJObject(klass_)->AsClass()));
if (klass.Get() == nullptr) {
RecordFailure(ERR(INVALID_CLASS), "Unable to decode class argument!");
return false;
}
// Allocate the classExt
- art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(self_)));
+ art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(driver_->self_)));
if (ext.Get() == nullptr) {
// No memory. Clear exception (it's not useful) and return error.
// TODO This doesn't need to be fatal. We could just not support obsolete methods after hitting
// this case.
- self_->AssertPendingOOMException();
- self_->ClearException();
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate ClassExt");
return false;
}
@@ -849,12 +1050,12 @@ bool Redefiner::EnsureClassAllocationsFinished() {
// TODO Clear these after we walk the stacks in order to free them in the (likely?) event there
// are no obsolete methods.
{
- art::ObjectLock<art::mirror::ClassExt> lock(self_, ext);
+ art::ObjectLock<art::mirror::ClassExt> lock(driver_->self_, ext);
if (!ext->ExtendObsoleteArrays(
- self_, klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size())) {
+ driver_->self_, klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size())) {
// OOM. Clear exception and return error.
- self_->AssertPendingOOMException();
- self_->ClearException();
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate/extend obsolete methods map");
return false;
}
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 5852309291..8626bc54d5 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -63,50 +63,154 @@
namespace openjdkjvmti {
+class RedefinitionDataHolder;
+
// Class that can redefine a single class's methods.
// TODO We should really make this be driven by an outside class so we can do multiple classes at
// the same time and have less required cleanup.
class Redefiner {
public:
- // Redefine the given class with the given dex data. Note this function does not take ownership of
- // the dex_data pointer. It is not used after this call however and may be freed if desired.
+ // Redefine the given classes with the given dex data. Note this function does not take ownership
+ // of the dex_data pointers. It is not used after this call however and may be freed if desired.
// The caller is responsible for freeing it. The runtime makes its own copy of the data.
- static jvmtiError RedefineClass(ArtJvmTiEnv* env,
- art::Runtime* runtime,
- art::Thread* self,
- jclass klass,
- const std::string& original_dex_location,
- jint data_len,
- unsigned char* dex_data,
- std::string* error_msg);
+ static jvmtiError RedefineClasses(ArtJvmTiEnv* env,
+ art::Runtime* runtime,
+ art::Thread* self,
+ jint class_count,
+ const jvmtiClassDefinition* definitions,
+ std::string* error_msg);
static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
private:
+ class ClassRedefinition {
+ public:
+ ClassRedefinition(Redefiner* driver,
+ jclass klass,
+ const art::DexFile* redefined_dex_file,
+ const char* class_sig)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // NO_THREAD_SAFETY_ANALYSIS so we can unlock the class in the destructor.
+ ~ClassRedefinition() NO_THREAD_SAFETY_ANALYSIS;
+
+ // Move constructor so we can put these into a vector.
+ ClassRedefinition(ClassRedefinition&& other)
+ : driver_(other.driver_),
+ klass_(other.klass_),
+ dex_file_(std::move(other.dex_file_)),
+ class_sig_(std::move(other.class_sig_)) {
+ other.driver_ = nullptr;
+ }
+
+ art::mirror::Class* GetMirrorClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // This finds the java.lang.DexFile we will add the native DexFile to as part of the classpath.
+ // TODO Make sure the DexFile object returned is the one that the klass_ actually comes from.
+ art::mirror::Object* FindSourceDexFileObject(art::Handle<art::mirror::ClassLoader> loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ art::mirror::DexCache* CreateNewDexCache(art::Handle<art::mirror::ClassLoader> loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // Allocates and fills the new DexFileCookie
+ art::mirror::LongArray* AllocateDexFileCookie(art::Handle<art::mirror::Object> j_dex_file_obj)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void RecordFailure(jvmtiError e, const std::string& err) {
+ driver_->RecordFailure(e, class_sig_, err);
+ }
+
+ bool FinishRemainingAllocations(
+ /*out*/art::MutableHandle<art::mirror::ClassLoader>* source_class_loader,
+ /*out*/art::MutableHandle<art::mirror::Object>* source_dex_file_obj,
+ /*out*/art::MutableHandle<art::mirror::LongArray>* new_dex_file_cookie,
+ /*out*/art::MutableHandle<art::mirror::DexCache>* new_dex_cache)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void FillObsoleteMethodMap(
+ art::mirror::Class* art_klass,
+ const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes)
+ REQUIRES(art::Locks::mutator_lock_);
+
+
+ // Checks that the dex file contains only the single expected class and that the top-level class
+ // data has not been modified in an incompatible manner.
+ bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // Preallocates all needed allocations in klass so that we can pause execution safely.
+ // TODO We should be able to free the arrays if they end up not being used. Investigate doing
+ // this in the future. For now we will just take the memory hit.
+ bool EnsureClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // This will check that no constraints are violated (more than 1 class in dex file, any changes
+ // in number/declaration of methods & fields, changes in access flags, etc.)
+ bool CheckRedefinitionIsValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // Checks that the class can even be redefined.
+ bool CheckRedefinable() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ // Checks that the dex file does not add/remove methods.
+ bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ LOG(WARNING) << "methods are not checked for modification currently";
+ return true;
+ }
+
+ // Checks that the dex file does not modify fields
+ bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ LOG(WARNING) << "Fields are not checked for modification currently";
+ return true;
+ }
+
+ void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
+ art::ObjPtr<art::mirror::LongArray> new_cookie)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void UpdateFields(art::ObjPtr<art::mirror::Class> mclass)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
+ art::ObjPtr<art::mirror::DexCache> new_dex_cache,
+ const art::DexFile::ClassDef& class_def)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
+ art::ObjPtr<art::mirror::DexCache> new_dex_cache)
+ REQUIRES(art::Locks::mutator_lock_);
+
+ void ReleaseDexFile() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ private:
+ Redefiner* driver_;
+ jclass klass_;
+ std::unique_ptr<const art::DexFile> dex_file_;
+ std::string class_sig_;
+ };
+
jvmtiError result_;
art::Runtime* runtime_;
art::Thread* self_;
+ std::vector<ClassRedefinition> redefinitions_;
// Kept as a jclass since we have weird run-state changes that make keeping it around as a
// mirror::Class difficult and confusing.
- jclass klass_;
- std::unique_ptr<const art::DexFile> dex_file_;
std::string* error_msg_;
- char* class_sig_;
// TODO Maybe change jclass to a mirror::Class
Redefiner(art::Runtime* runtime,
art::Thread* self,
- jclass klass,
- char* class_sig,
- std::unique_ptr<const art::DexFile>& redefined_dex_file,
std::string* error_msg)
: result_(ERR(INTERNAL)),
runtime_(runtime),
self_(self),
- klass_(klass),
- dex_file_(std::move(redefined_dex_file)),
- error_msg_(error_msg),
- class_sig_(class_sig) { }
+ redefinitions_(),
+ error_msg_(error_msg) { }
+
+ jvmtiError AddRedefinition(ArtJvmTiEnv* env, const jvmtiClassDefinition& def)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
static jvmtiError GetClassRedefinitionError(art::Handle<art::mirror::Class> klass,
/*out*/std::string* error_msg)
@@ -114,23 +218,17 @@ class Redefiner {
static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
jint data_len,
- unsigned char* dex_data,
+ const unsigned char* dex_data,
std::string* error_msg);
// TODO Put on all the lock qualifiers.
jvmtiError Run() REQUIRES_SHARED(art::Locks::mutator_lock_);
- bool FinishRemainingAllocations(
- /*out*/art::MutableHandle<art::mirror::ClassLoader>* source_class_loader,
- /*out*/art::MutableHandle<art::mirror::Object>* source_dex_file_obj,
- /*out*/art::MutableHandle<art::mirror::LongArray>* new_dex_file_cookie,
- /*out*/art::MutableHandle<art::mirror::DexCache>* new_dex_cache)
+ bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool EnsureAllClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- // Preallocates all needed allocations in klass so that we can pause execution safely.
- // TODO We should be able to free the arrays if they end up not being used. Investigate doing this
- // in the future. For now we will just take the memory hit.
- bool EnsureClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void ReleaseAllDexFiles() REQUIRES_SHARED(art::Locks::mutator_lock_);
// Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
// pointers to their ArtMethods stashed in registers that they then use to attempt to hit the
@@ -140,70 +238,12 @@ class Redefiner {
REQUIRES(!art::Locks::thread_list_lock_,
!art::Locks::classlinker_classes_lock_);
- art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- // This finds the java.lang.DexFile we will add the native DexFile to as part of the classpath.
- // TODO Make sure the DexFile object returned is the one that the klass_ actually comes from.
- art::mirror::Object* FindSourceDexFileObject(art::Handle<art::mirror::ClassLoader> loader)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- art::mirror::Class* GetMirrorClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- // Allocates and fills the new DexFileCookie
- art::mirror::LongArray* AllocateDexFileCookie(art::Handle<art::mirror::Object> java_dex_file_obj)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- art::mirror::DexCache* CreateNewDexCache(art::Handle<art::mirror::ClassLoader> loader)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- void RecordFailure(jvmtiError result, const std::string& error_msg);
-
- // This will check that no constraints are violated (more than 1 class in dex file, any changes in
- // number/declaration of methods & fields, changes in access flags, etc.)
- bool CheckRedefinitionIsValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- // Checks that the class can even be redefined.
- bool CheckRedefinable() REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- // Checks that the dex file does not add/remove methods.
- bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_) {
- LOG(WARNING) << "methods are not checked for modification currently";
- return true;
- }
-
- // Checks that the dex file does not modify fields
- bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_) {
- LOG(WARNING) << "Fields are not checked for modification currently";
- return true;
+ void RecordFailure(jvmtiError result, const std::string& class_sig, const std::string& error_msg);
+ void RecordFailure(jvmtiError result, const std::string& error_msg) {
+ RecordFailure(result, "NO CLASS", error_msg);
}
- // Checks that the dex file contains only the single expected class and that the top-level class
- // data has not been modified in an incompatible manner.
- bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
-
- void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
- art::ObjPtr<art::mirror::LongArray> new_cookie,
- /*out*/art::ObjPtr<art::mirror::LongArray>* original_cookie)
- REQUIRES(art::Locks::mutator_lock_);
-
- void UpdateFields(art::ObjPtr<art::mirror::Class> mclass)
- REQUIRES(art::Locks::mutator_lock_);
-
- void UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
- art::ObjPtr<art::mirror::DexCache> new_dex_cache,
- const art::DexFile::ClassDef& class_def)
- REQUIRES(art::Locks::mutator_lock_);
-
- void UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
- art::ObjPtr<art::mirror::DexCache> new_dex_cache)
- REQUIRES(art::Locks::mutator_lock_);
-
- void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
- REQUIRES(art::Locks::mutator_lock_);
-
- void FillObsoleteMethodMap(art::mirror::Class* art_klass,
- const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsoletes)
- REQUIRES(art::Locks::mutator_lock_);
+ friend struct CallbackCtx;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 579fb50ecc..4cf55a6a98 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -31,29 +31,37 @@
#include "ti_stack.h"
+#include <algorithm>
+#include <list>
+#include <unordered_map>
+#include <vector>
+
#include "art_jvmti.h"
#include "art_method-inl.h"
+#include "base/bit_utils.h"
#include "base/enums.h"
+#include "base/mutex.h"
#include "dex_file.h"
#include "dex_file_annotations.h"
+#include "handle_scope-inl.h"
#include "jni_env_ext.h"
#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/dex_cache.h"
#include "scoped_thread_state_change-inl.h"
+#include "ScopedLocalRef.h"
#include "stack.h"
-#include "thread.h"
+#include "thread-inl.h"
+#include "thread_list.h"
#include "thread_pool.h"
namespace openjdkjvmti {
struct GetStackTraceVisitor : public art::StackVisitor {
GetStackTraceVisitor(art::Thread* thread_in,
- art::ScopedObjectAccessAlreadyRunnable& soa_,
size_t start_,
size_t stop_)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- soa(soa_),
start(start_),
stop(stop_) {}
@@ -85,7 +93,6 @@ struct GetStackTraceVisitor : public art::StackVisitor {
return true;
}
- art::ScopedObjectAccessAlreadyRunnable& soa;
std::vector<jvmtiFrameInfo> frames;
size_t start;
size_t stop;
@@ -99,10 +106,8 @@ struct GetStackTraceClosure : public art::Closure {
start_result(0),
stop_result(0) {}
- void Run(art::Thread* self) OVERRIDE {
- art::ScopedObjectAccess soa(art::Thread::Current());
-
- GetStackTraceVisitor visitor(self, soa, start_input, stop_input);
+ void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ GetStackTraceVisitor visitor(self, start_input, stop_input);
visitor.WalkStack(false);
frames.swap(visitor.frames);
@@ -118,24 +123,81 @@ struct GetStackTraceClosure : public art::Closure {
size_t stop_result;
};
+static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
+ jint start_depth,
+ size_t start_result,
+ jint max_frame_count,
+ jvmtiFrameInfo* frame_buffer,
+ jint* count_ptr) {
+ size_t collected_frames = frames.size();
+
+ // Assume we're here having collected something.
+ DCHECK_GT(max_frame_count, 0);
+
+ // Frames from the top.
+ if (start_depth >= 0) {
+ if (start_result != 0) {
+ // Not enough frames.
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
+ if (frames.size() > 0) {
+ memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
+ }
+ *count_ptr = static_cast<jint>(frames.size());
+ return ERR(NONE);
+ }
+
+ // Frames from the bottom.
+ if (collected_frames < static_cast<size_t>(-start_depth)) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+
+ size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
+ memcpy(frame_buffer,
+ &frames.data()[collected_frames + start_depth],
+ count * sizeof(jvmtiFrameInfo));
+ *count_ptr = static_cast<jint>(count);
+ return ERR(NONE);
+}
+
+static jvmtiError GetThread(JNIEnv* env, jthread java_thread, art::Thread** thread) {
+ if (java_thread == nullptr) {
+ *thread = art::Thread::Current();
+ if (*thread == nullptr) {
+ // GetStackTrace can only be run during the live phase, so the current thread should be
+ // attached and thus available. Getting a null for current means we're starting up or
+ // dying.
+ return ERR(WRONG_PHASE);
+ }
+ } else {
+ if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) {
+ return ERR(INVALID_THREAD);
+ }
+
+ // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ *thread = art::Thread::FromManagedThread(soa, java_thread);
+ if (*thread == nullptr) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ }
+ return ERR(NONE);
+}
+
jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
jthread java_thread,
jint start_depth,
jint max_frame_count,
jvmtiFrameInfo* frame_buffer,
jint* count_ptr) {
- if (java_thread == nullptr) {
- return ERR(INVALID_THREAD);
- }
-
art::Thread* thread;
- {
- // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
- art::ScopedObjectAccess soa(art::Thread::Current());
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
- thread = art::Thread::FromManagedThread(soa, java_thread);
- DCHECK(thread != nullptr);
+ jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
+ if (thread_error != ERR(NONE)) {
+ return thread_error;
}
+ DCHECK(thread != nullptr);
art::ThreadState state = thread->GetState();
if (state == art::ThreadState::kStarting ||
@@ -157,35 +219,506 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
}
GetStackTraceClosure closure(start_depth >= 0 ? static_cast<size_t>(start_depth) : 0,
- start_depth >= 0 ?static_cast<size_t>(max_frame_count) : 0);
+ start_depth >= 0 ? static_cast<size_t>(max_frame_count) : 0);
thread->RequestSynchronousCheckpoint(&closure);
- size_t collected_frames = closure.frames.size();
+ return TranslateFrameVector(closure.frames,
+ start_depth,
+ closure.start_result,
+ max_frame_count,
+ frame_buffer,
+ count_ptr);
+}
- // Frames from the top.
- if (start_depth >= 0) {
- if (closure.start_result != 0) {
- // Not enough frames.
- return ERR(ILLEGAL_ARGUMENT);
+struct GetAllStackTraceClosure : public art::Closure {
+ public:
+ explicit GetAllStackTraceClosure(size_t stop)
+ : start_input(0),
+ stop_input(stop),
+ frames_lock("GetAllStackTraceGuard", art::LockLevel::kAbortLock),
+ start_result(0),
+ stop_result(0) {}
+
+ void Run(art::Thread* self)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) REQUIRES(!frames_lock) {
+ // self should be live here (so it could be suspended). No need to filter.
+
+ art::Thread* current = art::Thread::Current();
+ std::vector<jvmtiFrameInfo> self_frames;
+
+ GetStackTraceVisitor visitor(self, start_input, stop_input);
+ visitor.WalkStack(false);
+
+ self_frames.swap(visitor.frames);
+
+ art::MutexLock mu(current, frames_lock);
+ frames.emplace(self, self_frames);
+ }
+
+ const size_t start_input;
+ const size_t stop_input;
+
+ art::Mutex frames_lock;
+ std::unordered_map<art::Thread*, std::vector<jvmtiFrameInfo>> frames GUARDED_BY(frames_lock);
+ size_t start_result;
+ size_t stop_result;
+};
+
+
+
+jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
+ jint max_frame_count,
+ jvmtiStackInfo** stack_info_ptr,
+ jint* thread_count_ptr) {
+ if (max_frame_count < 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+
+ art::Thread* current = art::Thread::Current();
+ art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
+ art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
+ art::ScopedSuspendAll ssa("GetAllStackTraces");
+
+ std::vector<art::Thread*> threads;
+ std::vector<std::vector<jvmtiFrameInfo>> frames;
+ {
+ std::list<art::Thread*> thread_list;
+ {
+ art::MutexLock mu(current, *art::Locks::thread_list_lock_);
+ thread_list = art::Runtime::Current()->GetThreadList()->GetList();
+ }
+
+ for (art::Thread* thread : thread_list) {
+ // Skip threads that are still starting.
+ if (thread->IsStillStarting()) {
+ continue;
+ }
+
+ GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
+ thread->RequestSynchronousCheckpoint(&closure);
+
+ threads.push_back(thread);
+ frames.emplace_back();
+ frames.back().swap(closure.frames);
+ }
+ }
+
+ // Convert the data into our output format. Note: we need to keep the threads suspended,
+ // as we need to access them for their peers.
+
+ // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
+ // allocate one big chunk for this and the actual frames, which means we need
+ // to either be conservative or rearrange things later (the latter is implemented).
+ std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
+ std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
+ frame_infos.reserve(frames.size());
+
+ // Now run through and add data for each thread.
+ size_t sum_frames = 0;
+ for (size_t index = 0; index < frames.size(); ++index) {
+ jvmtiStackInfo& stack_info = stack_info_array.get()[index];
+ memset(&stack_info, 0, sizeof(jvmtiStackInfo));
+
+ art::Thread* self = threads[index];
+ const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
+
+ // For the time being, set the thread to null. We don't have good ScopedLocalRef
+ // infrastructure.
+ DCHECK(self->GetPeer() != nullptr);
+ stack_info.thread = nullptr;
+ stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
+
+ size_t collected_frames = thread_frames.size();
+ if (max_frame_count == 0 || collected_frames == 0) {
+ stack_info.frame_count = 0;
+ stack_info.frame_buffer = nullptr;
+ continue;
}
DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
- if (closure.frames.size() > 0) {
- memcpy(frame_buffer, closure.frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
+
+ jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
+ frame_infos.emplace_back(frame_info);
+
+ jint count;
+ jvmtiError translate_result = TranslateFrameVector(thread_frames,
+ 0,
+ 0,
+ static_cast<jint>(collected_frames),
+ frame_info,
+ &count);
+ DCHECK(translate_result == JVMTI_ERROR_NONE);
+ stack_info.frame_count = static_cast<jint>(collected_frames);
+ stack_info.frame_buffer = frame_info;
+ sum_frames += static_cast<size_t>(count);
+ }
+
+ // No errors, yet. Now put it all into an output buffer.
+ size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * frames.size(),
+ alignof(jvmtiFrameInfo));
+ size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
+ unsigned char* chunk_data;
+ jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
+ if (alloc_result != ERR(NONE)) {
+ return alloc_result;
+ }
+
+ jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
+ // First copy in all the basic data.
+ memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * frames.size());
+
+ // Now copy the frames and fix up the pointers.
+ jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
+ chunk_data + rounded_stack_info_size);
+ for (size_t i = 0; i < frames.size(); ++i) {
+ jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
+ jvmtiStackInfo& new_stack_info = stack_info[i];
+
+ jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(threads[i]->GetPeer());
+ new_stack_info.thread = thread_peer;
+
+ if (old_stack_info.frame_count > 0) {
+ // Only copy when there's data - leave the nullptr alone.
+ size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
+ memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
+ new_stack_info.frame_buffer = frame_info;
+ frame_info += old_stack_info.frame_count;
}
- *count_ptr = static_cast<jint>(closure.frames.size());
+ }
+
+ *stack_info_ptr = stack_info;
+ *thread_count_ptr = static_cast<jint>(frames.size());
+
+ return ERR(NONE);
+}
+
+jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
+ jint thread_count,
+ const jthread* thread_list,
+ jint max_frame_count,
+ jvmtiStackInfo** stack_info_ptr) {
+ if (max_frame_count < 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ if (thread_count < 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ if (thread_count == 0) {
+ *stack_info_ptr = nullptr;
return ERR(NONE);
}
+ if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
- // Frames from the bottom.
- if (collected_frames < static_cast<size_t>(-start_depth)) {
+ art::Thread* current = art::Thread::Current();
+ art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
+
+ // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
+ art::VariableSizedHandleScope hs(current);
+ std::vector<art::Handle<art::mirror::Object>> handles;
+ for (jint i = 0; i != thread_count; ++i) {
+ if (thread_list[i] == nullptr) {
+ return ERR(INVALID_THREAD);
+ }
+ if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
+ return ERR(INVALID_THREAD);
+ }
+ handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
+ }
+
+ std::vector<art::Thread*> threads;
+ std::vector<size_t> thread_list_indices;
+ std::vector<std::vector<jvmtiFrameInfo>> frames;
+
+ {
+ art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
+ art::ScopedSuspendAll ssa("GetThreadListStackTraces");
+
+ {
+ std::list<art::Thread*> art_thread_list;
+ {
+ art::MutexLock mu(current, *art::Locks::thread_list_lock_);
+ art_thread_list = art::Runtime::Current()->GetThreadList()->GetList();
+ }
+
+ for (art::Thread* thread : art_thread_list) {
+ if (thread->IsStillStarting()) {
+ // Skip this. We can't get the jpeer, and if it is for a thread in the thread_list,
+ // we'll just report STARTING.
+ continue;
+ }
+
+ // Get the peer, and check whether we know it.
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+ for (size_t index = 0; index != handles.size(); ++index) {
+ if (peer == handles[index].Get()) {
+ // Found the thread.
+ GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
+ thread->RequestSynchronousCheckpoint(&closure);
+
+ threads.push_back(thread);
+ thread_list_indices.push_back(index);
+ frames.emplace_back();
+ frames.back().swap(closure.frames);
+
+ continue;
+ }
+ }
+
+ // Must be not started, or dead. We'll deal with it at the end.
+ }
+ }
+ }
+
+ // Convert the data into our output format.
+
+ // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
+ // allocate one big chunk for this and the actual frames, which means we need
+ // to either be conservative or rearrange things later (the latter is implemented).
+ std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
+ std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
+ frame_infos.reserve(frames.size());
+
+ // Now run through and add data for each thread.
+ size_t sum_frames = 0;
+ for (size_t index = 0; index < frames.size(); ++index) {
+ jvmtiStackInfo& stack_info = stack_info_array.get()[index];
+ memset(&stack_info, 0, sizeof(jvmtiStackInfo));
+
+ art::Thread* self = threads[index];
+ const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
+
+ // For the time being, set the thread to null. We don't have good ScopedLocalRef
+ // infrastructure.
+ DCHECK(self->GetPeer() != nullptr);
+ stack_info.thread = nullptr;
+ stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
+
+ size_t collected_frames = thread_frames.size();
+ if (max_frame_count == 0 || collected_frames == 0) {
+ stack_info.frame_count = 0;
+ stack_info.frame_buffer = nullptr;
+ continue;
+ }
+ DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
+
+ jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
+ frame_infos.emplace_back(frame_info);
+
+ jint count;
+ jvmtiError translate_result = TranslateFrameVector(thread_frames,
+ 0,
+ 0,
+ static_cast<jint>(collected_frames),
+ frame_info,
+ &count);
+ DCHECK(translate_result == JVMTI_ERROR_NONE);
+ stack_info.frame_count = static_cast<jint>(collected_frames);
+ stack_info.frame_buffer = frame_info;
+ sum_frames += static_cast<size_t>(count);
+ }
+
+ // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
+ // potentially.
+ size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
+ alignof(jvmtiFrameInfo));
+ size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
+ unsigned char* chunk_data;
+ jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
+ if (alloc_result != ERR(NONE)) {
+ return alloc_result;
+ }
+
+ jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
+ jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
+ chunk_data + rounded_stack_info_size);
+
+ for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
+ // Check whether we found a running thread for this.
+ // Note: For simplicity, and with the expectation that the list is usually small, use a simple
+ // search. (The list is *not* sorted!)
+ auto it = std::find(thread_list_indices.begin(), thread_list_indices.end(), i);
+ if (it == thread_list_indices.end()) {
+ // No native thread. Must be new or dead. We need to fill out the stack info now.
+ // (Need to read the Java "started" field to know whether this is starting or terminated.)
+ art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
+ art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
+ art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
+ CHECK(started_field != nullptr);
+ bool started = started_field->GetBoolean(peer) != 0;
+ constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
+ constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
+ JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
+ stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
+ stack_info[i].state = started ? kTerminatedState : kStartedState;
+ stack_info[i].frame_count = 0;
+ stack_info[i].frame_buffer = nullptr;
+ } else {
+ // Had a native thread and frames.
+ size_t f_index = it - thread_list_indices.begin();
+
+ jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
+ jvmtiStackInfo& new_stack_info = stack_info[i];
+
+ memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
+ new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
+ if (old_stack_info.frame_count > 0) {
+ // Only copy when there's data - leave the nullptr alone.
+ size_t frames_size =
+ static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
+ memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
+ new_stack_info.frame_buffer = frame_info;
+ frame_info += old_stack_info.frame_count;
+ }
+ }
+ }
+
+ * stack_info_ptr = stack_info;
+
+ return ERR(NONE);
+}
+
+// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
+// runtime methods and transitions must not be counted.
+struct GetFrameCountVisitor : public art::StackVisitor {
+ explicit GetFrameCountVisitor(art::Thread* thread)
+ : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ count(0) {}
+
+ bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = GetMethod();
+ const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
+ if (do_count) {
+ count++;
+ }
+ return true;
+ }
+
+ size_t count;
+};
+
+struct GetFrameCountClosure : public art::Closure {
+ public:
+ GetFrameCountClosure() : count(0) {}
+
+ void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ GetFrameCountVisitor visitor(self);
+ visitor.WalkStack(false);
+
+ count = visitor.count;
+ }
+
+ size_t count;
+};
+
+jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread java_thread,
+ jint* count_ptr) {
+ art::Thread* thread;
+ jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
+ if (thread_error != ERR(NONE)) {
+ return thread_error;
+ }
+ DCHECK(thread != nullptr);
+
+ if (count_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ GetFrameCountClosure closure;
+ thread->RequestSynchronousCheckpoint(&closure);
+
+ *count_ptr = closure.count;
+ return ERR(NONE);
+}
+
+// Walks up the stack 'n' callers, when used with Thread::WalkStack.
+struct GetLocationVisitor : public art::StackVisitor {
+ GetLocationVisitor(art::Thread* thread, size_t n_in)
+ : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ n(n_in),
+ count(0),
+ caller(nullptr),
+ caller_dex_pc(0) {}
+
+ bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = GetMethod();
+ const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
+ if (do_count) {
+ DCHECK(caller == nullptr);
+ if (count == n) {
+ caller = m;
+ caller_dex_pc = GetDexPc(false);
+ return false;
+ }
+ count++;
+ }
+ return true;
+ }
+
+ const size_t n;
+ size_t count;
+ art::ArtMethod* caller;
+ uint32_t caller_dex_pc;
+};
+
+struct GetLocationClosure : public art::Closure {
+ public:
+ explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
+
+ void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ GetLocationVisitor visitor(self, n);
+ visitor.WalkStack(false);
+
+ method = visitor.caller;
+ dex_pc = visitor.caller_dex_pc;
+ }
+
+ const size_t n;
+ art::ArtMethod* method;
+ uint32_t dex_pc;
+};
+
+jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread java_thread,
+ jint depth,
+ jmethodID* method_ptr,
+ jlocation* location_ptr) {
+ art::Thread* thread;
+ jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
+ if (thread_error != ERR(NONE)) {
+ return thread_error;
+ }
+ DCHECK(thread != nullptr);
+
+ if (depth < 0) {
return ERR(ILLEGAL_ARGUMENT);
}
+ if (method_ptr == nullptr || location_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ GetLocationClosure closure(static_cast<size_t>(depth));
+ thread->RequestSynchronousCheckpoint(&closure);
+
+ if (closure.method == nullptr) {
+ return ERR(NO_MORE_FRAMES);
+ }
+
+ *method_ptr = art::jni::EncodeArtMethod(closure.method);
+ if (closure.method->IsNative()) {
+ *location_ptr = -1;
+ } else {
+ if (closure.dex_pc == art::DexFile::kDexNoIndex) {
+ return ERR(INTERNAL);
+ }
+ *location_ptr = static_cast<jlocation>(closure.dex_pc);
+ }
- size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
- memcpy(frame_buffer,
- &closure.frames.data()[collected_frames + start_depth],
- count * sizeof(jvmtiFrameInfo));
- *count_ptr = static_cast<jint>(count);
return ERR(NONE);
}
diff --git a/runtime/openjdkjvmti/ti_stack.h b/runtime/openjdkjvmti/ti_stack.h
index 1931ed3113..6a593cfa9c 100644
--- a/runtime/openjdkjvmti/ti_stack.h
+++ b/runtime/openjdkjvmti/ti_stack.h
@@ -32,18 +32,41 @@
#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_STACK_H_
#define ART_RUNTIME_OPENJDKJVMTI_TI_STACK_H_
+#include "jni.h"
#include "jvmti.h"
+#include "base/mutex.h"
+
namespace openjdkjvmti {
class StackUtil {
public:
+ static jvmtiError GetAllStackTraces(jvmtiEnv* env,
+ jint max_frame_count,
+ jvmtiStackInfo** stack_info_ptr,
+ jint* thread_count_ptr)
+ REQUIRES(!art::Locks::thread_list_lock_);
+
+ static jvmtiError GetFrameCount(jvmtiEnv* env, jthread thread, jint* count_ptr);
+
+ static jvmtiError GetFrameLocation(jvmtiEnv* env,
+ jthread thread,
+ jint depth,
+ jmethodID* method_ptr,
+ jlocation* location_ptr);
+
static jvmtiError GetStackTrace(jvmtiEnv* env,
jthread thread,
jint start_depth,
jint max_frame_count,
jvmtiFrameInfo* frame_buffer,
jint* count_ptr);
+
+ static jvmtiError GetThreadListStackTraces(jvmtiEnv* env,
+ jint thread_count,
+ const jthread* thread_list,
+ jint max_frame_count,
+ jvmtiStackInfo** stack_info_ptr);
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
new file mode 100644
index 0000000000..2bcdd8cda1
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -0,0 +1,446 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_thread.h"
+
+#include "art_field.h"
+#include "art_jvmti.h"
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "gc/system_weak.h"
+#include "gc_root-inl.h"
+#include "jni_internal.h"
+#include "mirror/class.h"
+#include "mirror/object-inl.h"
+#include "mirror/string.h"
+#include "obj_ptr.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+#include "well_known_classes.h"
+
+namespace openjdkjvmti {
+
+jvmtiError ThreadUtil::GetCurrentThread(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread* thread_ptr) {
+ art::Thread* self = art::Thread::Current();
+
+ art::ScopedObjectAccess soa(self);
+
+ jthread thread_peer;
+ if (self->IsStillStarting()) {
+ thread_peer = nullptr;
+ } else {
+ thread_peer = soa.AddLocalReference<jthread>(self->GetPeer());
+ }
+
+ *thread_ptr = thread_peer;
+ return ERR(NONE);
+}
+
+// Read the context classloader from a Java thread object. This is a lazy implementation
+// that assumes GetThreadInfo isn't called too often. If we instead cache the ArtField,
+// we will have to add synchronization as this can't be cached on startup (which is
+// potentially runtime startup).
+static art::ObjPtr<art::mirror::Object> GetContextClassLoader(art::ObjPtr<art::mirror::Object> peer)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (peer == nullptr) {
+ return nullptr;
+ }
+ art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
+ art::ArtField* cc_field = klass->FindDeclaredInstanceField("contextClassLoader",
+ "Ljava/lang/ClassLoader;");
+ CHECK(cc_field != nullptr);
+ return cc_field->GetObject(peer);
+}
+
+// Get the native thread. The spec says a null object denotes the current thread.
+static art::Thread* GetNativeThread(jthread thread,
+ const art::ScopedObjectAccessAlreadyRunnable& soa)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (thread == nullptr) {
+ return art::Thread::Current();
+ }
+
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ return art::Thread::FromManagedThread(soa, thread);
+}
+
+jvmtiError ThreadUtil::GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr) {
+ if (info_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::ScopedObjectAccess soa(art::Thread::Current());
+
+ art::Thread* self = GetNativeThread(thread, soa);
+ if (self == nullptr && thread == nullptr) {
+ return ERR(INVALID_THREAD);
+ }
+
+ JvmtiUniquePtr name_uptr;
+ if (self != nullptr) {
+ // Have a native thread object, this thread is alive.
+ std::string name;
+ self->GetThreadName(name);
+ jvmtiError name_result = CopyString(
+ env, name.c_str(), reinterpret_cast<unsigned char**>(&info_ptr->name));
+ if (name_result != ERR(NONE)) {
+ return name_result;
+ }
+ name_uptr = MakeJvmtiUniquePtr(env, info_ptr->name);
+
+ info_ptr->priority = self->GetNativePriority();
+
+ info_ptr->is_daemon = self->IsDaemon();
+
+ art::ObjPtr<art::mirror::Object> peer = self->GetPeer();
+
+ // ThreadGroup.
+ if (peer != nullptr) {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_group);
+ CHECK(f != nullptr);
+ art::ObjPtr<art::mirror::Object> group = f->GetObject(peer);
+ info_ptr->thread_group = group == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jthreadGroup>(group);
+ } else {
+ info_ptr->thread_group = nullptr;
+ }
+
+ // Context classloader.
+ art::ObjPtr<art::mirror::Object> ccl = GetContextClassLoader(peer);
+ info_ptr->context_class_loader = ccl == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jobject>(ccl);
+ } else {
+ // Only the peer. This thread has either not been started, or is dead. Read things from
+ // the Java side.
+ art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread);
+
+ // Name.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_name);
+ CHECK(f != nullptr);
+ art::ObjPtr<art::mirror::Object> name = f->GetObject(peer);
+ std::string name_cpp;
+ const char* name_cstr;
+ if (name != nullptr) {
+ name_cpp = name->AsString()->ToModifiedUtf8();
+ name_cstr = name_cpp.c_str();
+ } else {
+ name_cstr = "";
+ }
+ jvmtiError name_result = CopyString(
+ env, name_cstr, reinterpret_cast<unsigned char**>(&info_ptr->name));
+ if (name_result != ERR(NONE)) {
+ return name_result;
+ }
+ name_uptr = MakeJvmtiUniquePtr(env, info_ptr->name);
+ }
+
+ // Priority.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_priority);
+ CHECK(f != nullptr);
+ info_ptr->priority = static_cast<jint>(f->GetInt(peer));
+ }
+
+ // Daemon.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_daemon);
+ CHECK(f != nullptr);
+ info_ptr->is_daemon = f->GetBoolean(peer) == 0 ? JNI_FALSE : JNI_TRUE;
+ }
+
+ // ThreadGroup.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_group);
+ CHECK(f != nullptr);
+ art::ObjPtr<art::mirror::Object> group = f->GetObject(peer);
+ info_ptr->thread_group = group == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jthreadGroup>(group);
+ }
+
+ // Context classloader.
+ art::ObjPtr<art::mirror::Object> ccl = GetContextClassLoader(peer);
+ info_ptr->context_class_loader = ccl == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jobject>(ccl);
+ }
+
+ name_uptr.release();
+
+ return ERR(NONE);
+}
+
+// Return the thread's (or current thread, if null) thread state. Return kStarting in case
+// there's no native counterpart (thread hasn't been started, yet, or is dead).
+static art::ThreadState GetNativeThreadState(jthread thread,
+ const art::ScopedObjectAccessAlreadyRunnable& soa,
+ art::Thread** native_thread)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::Thread* self = nullptr;
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ if (thread == nullptr) {
+ self = art::Thread::Current();
+ } else {
+ self = art::Thread::FromManagedThread(soa, thread);
+ }
+ *native_thread = self;
+ if (self == nullptr || self->IsStillStarting()) {
+ return art::ThreadState::kStarting;
+ }
+ return self->GetState();
+}
+
+static jint GetJvmtiThreadStateFromInternal(art::ThreadState internal_thread_state) {
+ jint jvmti_state = JVMTI_THREAD_STATE_ALIVE;
+
+ if (internal_thread_state == art::ThreadState::kSuspended) {
+ jvmti_state |= JVMTI_THREAD_STATE_SUSPENDED;
+ // Note: We do not have data about the previous state. Otherwise we should load the previous
+ // state here.
+ }
+
+ if (internal_thread_state == art::ThreadState::kNative) {
+ jvmti_state |= JVMTI_THREAD_STATE_IN_NATIVE;
+ }
+
+ if (internal_thread_state == art::ThreadState::kRunnable ||
+ internal_thread_state == art::ThreadState::kWaitingWeakGcRootRead ||
+ internal_thread_state == art::ThreadState::kSuspended) {
+ jvmti_state |= JVMTI_THREAD_STATE_RUNNABLE;
+ } else if (internal_thread_state == art::ThreadState::kBlocked) {
+ jvmti_state |= JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;
+ } else {
+ // Should be in waiting state.
+ jvmti_state |= JVMTI_THREAD_STATE_WAITING;
+
+ if (internal_thread_state == art::ThreadState::kTimedWaiting ||
+ internal_thread_state == art::ThreadState::kSleeping) {
+ jvmti_state |= JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT;
+ } else {
+ jvmti_state |= JVMTI_THREAD_STATE_WAITING_INDEFINITELY;
+ }
+
+ if (internal_thread_state == art::ThreadState::kSleeping) {
+ jvmti_state |= JVMTI_THREAD_STATE_SLEEPING;
+ }
+
+ if (internal_thread_state == art::ThreadState::kTimedWaiting ||
+ internal_thread_state == art::ThreadState::kWaiting) {
+ jvmti_state |= JVMTI_THREAD_STATE_IN_OBJECT_WAIT;
+ }
+
+ // TODO: PARKED. We'll have to inspect the stack.
+ }
+
+ return jvmti_state;
+}
+
+static jint GetJavaStateFromInternal(art::ThreadState internal_thread_state) {
+ switch (internal_thread_state) {
+ case art::ThreadState::kTerminated:
+ return JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
+
+ case art::ThreadState::kRunnable:
+ case art::ThreadState::kNative:
+ case art::ThreadState::kWaitingWeakGcRootRead:
+ case art::ThreadState::kSuspended:
+ return JVMTI_JAVA_LANG_THREAD_STATE_RUNNABLE;
+
+ case art::ThreadState::kTimedWaiting:
+ case art::ThreadState::kSleeping:
+ return JVMTI_JAVA_LANG_THREAD_STATE_TIMED_WAITING;
+
+ case art::ThreadState::kBlocked:
+ return JVMTI_JAVA_LANG_THREAD_STATE_BLOCKED;
+
+ case art::ThreadState::kStarting:
+ return JVMTI_JAVA_LANG_THREAD_STATE_NEW;
+
+ case art::ThreadState::kWaiting:
+ case art::ThreadState::kWaitingForGcToComplete:
+ case art::ThreadState::kWaitingPerformingGc:
+ case art::ThreadState::kWaitingForCheckPointsToRun:
+ case art::ThreadState::kWaitingForDebuggerSend:
+ case art::ThreadState::kWaitingForDebuggerToAttach:
+ case art::ThreadState::kWaitingInMainDebuggerLoop:
+ case art::ThreadState::kWaitingForDebuggerSuspension:
+ case art::ThreadState::kWaitingForDeoptimization:
+ case art::ThreadState::kWaitingForGetObjectsAllocated:
+ case art::ThreadState::kWaitingForJniOnLoad:
+ case art::ThreadState::kWaitingForSignalCatcherOutput:
+ case art::ThreadState::kWaitingInMainSignalCatcherLoop:
+ case art::ThreadState::kWaitingForMethodTracingStart:
+ case art::ThreadState::kWaitingForVisitObjects:
+ case art::ThreadState::kWaitingForGcThreadFlip:
+ return JVMTI_JAVA_LANG_THREAD_STATE_WAITING;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+jvmtiError ThreadUtil::GetThreadState(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread thread,
+ jint* thread_state_ptr) {
+ if (thread_state_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::Thread* native_thread = nullptr;
+ art::ThreadState internal_thread_state = GetNativeThreadState(thread, soa, &native_thread);
+
+ if (internal_thread_state == art::ThreadState::kStarting) {
+ if (thread == nullptr) {
+ // No native thread, and no Java thread? We must be starting up. Report as wrong phase.
+ return ERR(WRONG_PHASE);
+ }
+
+ // Need to read the Java "started" field to know whether this is starting or terminated.
+ art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread);
+ art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
+ art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
+ CHECK(started_field != nullptr);
+ bool started = started_field->GetBoolean(peer) != 0;
+ constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
+ constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
+ JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
+ *thread_state_ptr = started ? kTerminatedState : kStartedState;
+ return ERR(NONE);
+ }
+ DCHECK(native_thread != nullptr);
+
+ // Translate internal thread state to JVMTI and Java state.
+ jint jvmti_state = GetJvmtiThreadStateFromInternal(internal_thread_state);
+ if (native_thread->IsInterrupted()) {
+ jvmti_state |= JVMTI_THREAD_STATE_INTERRUPTED;
+ }
+
+ // Java state is derived from nativeGetState.
+ // Note: Our implementation assigns "runnable" to suspended. As such, we will have slightly
+ // different mask. However, this is for consistency with the Java view.
+ jint java_state = GetJavaStateFromInternal(internal_thread_state);
+
+ *thread_state_ptr = jvmti_state | java_state;
+
+ return ERR(NONE);
+}
+
+jvmtiError ThreadUtil::GetAllThreads(jvmtiEnv* env,
+ jint* threads_count_ptr,
+ jthread** threads_ptr) {
+ if (threads_count_ptr == nullptr || threads_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::Thread* current = art::Thread::Current();
+
+ art::ScopedObjectAccess soa(current);
+
+ art::MutexLock mu(current, *art::Locks::thread_list_lock_);
+ std::list<art::Thread*> thread_list = art::Runtime::Current()->GetThreadList()->GetList();
+
+ std::vector<art::ObjPtr<art::mirror::Object>> peers;
+
+ for (art::Thread* thread : thread_list) {
+ // Skip threads that are still starting.
+ if (thread->IsStillStarting()) {
+ continue;
+ }
+
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+ if (peer != nullptr) {
+ peers.push_back(peer);
+ }
+ }
+
+ if (peers.empty()) {
+ *threads_count_ptr = 0;
+ *threads_ptr = nullptr;
+ } else {
+ unsigned char* data;
+ jvmtiError data_result = env->Allocate(peers.size() * sizeof(jthread), &data);
+ if (data_result != ERR(NONE)) {
+ return data_result;
+ }
+ jthread* threads = reinterpret_cast<jthread*>(data);
+ for (size_t i = 0; i != peers.size(); ++i) {
+ threads[i] = soa.AddLocalReference<jthread>(peers[i]);
+ }
+
+ *threads_count_ptr = static_cast<jint>(peers.size());
+ *threads_ptr = threads;
+ }
+ return ERR(NONE);
+}
+
+jvmtiError ThreadUtil::SetThreadLocalStorage(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread thread,
+ const void* data) {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::Thread* self = GetNativeThread(thread, soa);
+ if (self == nullptr && thread == nullptr) {
+ return ERR(INVALID_THREAD);
+ }
+ if (self == nullptr) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+
+ self->SetCustomTLS(data);
+
+ return ERR(NONE);
+}
+
+jvmtiError ThreadUtil::GetThreadLocalStorage(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread thread,
+ void** data_ptr) {
+ if (data_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::Thread* self = GetNativeThread(thread, soa);
+ if (self == nullptr && thread == nullptr) {
+ return ERR(INVALID_THREAD);
+ }
+ if (self == nullptr) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+
+ *data_ptr = const_cast<void*>(self->GetCustomTLS());
+ return ERR(NONE);
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_thread.h b/runtime/openjdkjvmti/ti_thread.h
new file mode 100644
index 0000000000..290e9d49b2
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_thread.h
@@ -0,0 +1,56 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_THREAD_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TI_THREAD_H_
+
+#include "jni.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+class ThreadUtil {
+ public:
+ static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr);
+
+ static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr);
+
+ static jvmtiError GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr);
+
+ static jvmtiError GetThreadState(jvmtiEnv* env, jthread thread, jint* thread_state_ptr);
+
+ static jvmtiError SetThreadLocalStorage(jvmtiEnv* env, jthread thread, const void* data);
+ static jvmtiError GetThreadLocalStorage(jvmtiEnv* env, jthread thread, void** data_ptr);
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_TI_THREAD_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2086d70791..55e1852c0c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1364,6 +1364,34 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
return true;
}
+static bool EnsureJvmtiPlugin(Runtime* runtime,
+ std::vector<Plugin>* plugins,
+ std::string* error_msg) {
+ constexpr const char* plugin_name = kIsDebugBuild ? "libopenjdkjvmtid.so" : "libopenjdkjvmti.so";
+
+ // Is the plugin already loaded?
+ for (Plugin p : *plugins) {
+ if (p.GetLibrary() == plugin_name) {
+ return true;
+ }
+ }
+
+ // Is the process debuggable? Otherwise, do not attempt to load the plugin.
+ if (!runtime->IsDebuggable()) {
+ *error_msg = "Process is not debuggable.";
+ return false;
+ }
+
+ Plugin new_plugin = Plugin::Create(plugin_name);
+
+ if (!new_plugin.Load(error_msg)) {
+ return false;
+ }
+
+ plugins->push_back(std::move(new_plugin));
+ return true;
+}
+
// Attach a new agent and add it to the list of runtime agents
//
// TODO: once we decide on the threading model for agents,
@@ -1371,18 +1399,25 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
// (and we synchronize access to any shared data structures like "agents_")
//
void Runtime::AttachAgent(const std::string& agent_arg) {
+ std::string error_msg;
+ if (!EnsureJvmtiPlugin(this, &plugins_, &error_msg)) {
+ LOG(WARNING) << "Could not load plugin: " << error_msg;
+ ScopedObjectAccess soa(Thread::Current());
+ ThrowIOException("%s", error_msg.c_str());
+ return;
+ }
+
ti::Agent agent(agent_arg);
int res = 0;
- std::string err;
- ti::Agent::LoadError result = agent.Attach(&res, &err);
+ ti::Agent::LoadError result = agent.Attach(&res, &error_msg);
if (result == ti::Agent::kNoError) {
agents_.push_back(std::move(agent));
} else {
- LOG(ERROR) << "Agent attach failed (result=" << result << ") : " << err;
+ LOG(WARNING) << "Agent attach failed (result=" << result << ") : " << error_msg;
ScopedObjectAccess soa(Thread::Current());
- ThrowWrappedIOException("%s", err.c_str());
+ ThrowIOException("%s", error_msg.c_str());
}
}
@@ -2195,6 +2230,8 @@ void Runtime::AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder) {
gc::ScopedGCCriticalSection gcs(Thread::Current(),
gc::kGcCauseAddRemoveSystemWeakHolder,
gc::kCollectorTypeAddRemoveSystemWeakHolder);
+ // Note: The ScopedGCCriticalSection also ensures that the rest of the function is in
+ // a critical section.
system_weak_holders_.push_back(holder);
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a4f063141b..40b6d73d94 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2629,10 +2629,9 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pAllocArray)
QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck)
- QUICK_ENTRY_POINT_INFO(pAllocObject)
QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
- QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
+ QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks)
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
@@ -2728,6 +2727,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
+ QUICK_ENTRY_POINT_INFO(pInvokePolymorphic)
QUICK_ENTRY_POINT_INFO(pTestSuspend)
QUICK_ENTRY_POINT_INFO(pDeliverException)
QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
diff --git a/runtime/thread.h b/runtime/thread.h
index c7acfc4c9c..3958c10d57 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1140,6 +1140,14 @@ class Thread {
return debug_disallow_read_barrier_;
}
+ const void* GetCustomTLS() const {
+ return custom_tls_;
+ }
+
+ void SetCustomTLS(const void* data) {
+ custom_tls_ = data;
+ }
+
// Returns true if the current thread is the jit sensitive thread.
bool IsJitSensitiveThread() const {
return this == jit_sensitive_thread_;
@@ -1546,6 +1554,7 @@ class Thread {
// potentially better performance.
uint8_t* thread_local_pos;
uint8_t* thread_local_end;
+
// Thread-local allocation pointer.
uint8_t* thread_local_start;
@@ -1599,6 +1608,10 @@ class Thread {
// Pending extra checkpoints if checkpoint_function_ is already used.
std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
+ // Custom TLS field that can be used by plugins.
+ // TODO: Generalize once we have more plugins.
+ const void* custom_tls_;
+
// True if the thread is allowed to call back into java (for e.g. during class resolution).
// By default this is true.
bool can_call_into_java_;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 715b237a6a..25a179bd32 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3106,19 +3106,16 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
break;
}
const uint32_t proto_idx = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
- const char* descriptor =
+ const char* return_descriptor =
dex_file_->GetReturnTypeDescriptor(dex_file_->GetProtoId(proto_idx));
const RegType& return_type =
- reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
+ reg_types_.FromDescriptor(GetClassLoader(), return_descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(this, return_type);
} else {
work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
}
- // TODO(oth): remove when compiler support is available.
- Fail(VERIFY_ERROR_FORCE_INTERPRETER)
- << "invoke-polymorphic is not supported by compiler";
- have_pending_experimental_failure_ = true;
+ just_set_result = true;
break;
}
case Instruction::NEG_INT:
diff --git a/test/129-ThreadGetId/expected.txt b/test/129-ThreadGetId/expected.txt
index 134d8d0b47..aadf90d9d7 100644
--- a/test/129-ThreadGetId/expected.txt
+++ b/test/129-ThreadGetId/expected.txt
@@ -1 +1,2 @@
+HeapTaskDaemon depth 0
Finishing
diff --git a/test/129-ThreadGetId/src/Main.java b/test/129-ThreadGetId/src/Main.java
index 9934bba95f..6ba01ff0b5 100644
--- a/test/129-ThreadGetId/src/Main.java
+++ b/test/129-ThreadGetId/src/Main.java
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+import java.lang.reflect.Field;
import java.util.Map;
public class Main implements Runnable {
@@ -22,6 +23,7 @@ public class Main implements Runnable {
public static void main(String[] args) throws Exception {
final Thread[] threads = new Thread[numberOfThreads];
+ test_getStackTraces();
for (int t = 0; t < threads.length; t++) {
threads[t] = new Thread(new Main());
threads[t].start();
@@ -32,6 +34,43 @@ public class Main implements Runnable {
System.out.println("Finishing");
}
+ static Thread getHeapTaskDaemon() throws Exception {
+ Field f = ThreadGroup.class.getDeclaredField("systemThreadGroup");
+ f.setAccessible(true);
+ ThreadGroup systemThreadGroup = (ThreadGroup) f.get(null);
+
+ while (true) {
+ int activeCount = systemThreadGroup.activeCount();
+ Thread[] array = new Thread[activeCount];
+ systemThreadGroup.enumerate(array);
+ for (Thread thread : array) {
+ if (thread.getName().equals("HeapTaskDaemon") &&
+ thread.getState() != Thread.State.NEW) {
+ return thread;
+ }
+ }
+ // Yield to eventually get the daemon started.
+ Thread.sleep(10);
+ }
+ }
+
+ static void test_getStackTraces() throws Exception {
+ Thread heapDaemon = getHeapTaskDaemon();
+
+ // Force a GC to ensure the daemon truly started.
+ Runtime.getRuntime().gc();
+ // Check all the current threads for positive IDs.
+ Map<Thread, StackTraceElement[]> map = Thread.getAllStackTraces();
+ for (Map.Entry<Thread, StackTraceElement[]> pair : map.entrySet()) {
+ Thread thread = pair.getKey();
+ // Expect empty stack trace since we do not support suspending the GC thread for
+ // obtaining stack traces. See b/28261069.
+ if (thread == heapDaemon) {
+ System.out.println(thread.getName() + " depth " + pair.getValue().length);
+ }
+ }
+ }
+
public void test_getId() {
if (Thread.currentThread().getId() <= 0) {
System.out.println("current thread's ID is not positive");
diff --git a/test/494-checker-instanceof-tests/src/Main.java b/test/494-checker-instanceof-tests/src/Main.java
index 2eac6c92a5..acd2305f67 100644
--- a/test/494-checker-instanceof-tests/src/Main.java
+++ b/test/494-checker-instanceof-tests/src/Main.java
@@ -142,11 +142,11 @@ public class Main {
/// CHECK: LoadClass
/// CHECK: Return [<<Const>>]
public static boolean knownTestWithUnloadedClass() {
- return $inline$returnMain() instanceof String;
+ return $inline$returnUnrelated() instanceof String;
}
- public static Object $inline$returnMain() {
- return new Main();
+ public static Object $inline$returnUnrelated() {
+ return new Unrelated();
}
public static void expect(boolean expected, boolean actual) {
diff --git a/test/496-checker-inlining-class-loader/src/Main.java b/test/496-checker-inlining-class-loader/src/Main.java
index 15d4dc07bc..5deb77f29e 100644
--- a/test/496-checker-inlining-class-loader/src/Main.java
+++ b/test/496-checker-inlining-class-loader/src/Main.java
@@ -82,10 +82,10 @@ class MyClassLoader extends ClassLoader {
class LoadedByMyClassLoader {
/// CHECK-START: void LoadedByMyClassLoader.bar() inliner (before)
- /// CHECK: LoadClass
+ /// CHECK: LoadClass class_name:FirstSeenByMyClassLoader
/// CHECK-NEXT: ClinitCheck
/// CHECK-NEXT: InvokeStaticOrDirect
- /// CHECK-NEXT: LoadClass
+ /// CHECK-NEXT: LoadClass class_name:java.lang.System
/// CHECK-NEXT: ClinitCheck
/// CHECK-NEXT: StaticFieldGet
/// CHECK-NEXT: LoadString
@@ -93,10 +93,10 @@ class LoadedByMyClassLoader {
/// CHECK-NEXT: InvokeVirtual
/// CHECK-START: void LoadedByMyClassLoader.bar() inliner (after)
- /// CHECK: LoadClass
+ /// CHECK: LoadClass class_name:FirstSeenByMyClassLoader
/// CHECK-NEXT: ClinitCheck
/* We inlined FirstSeenByMyClassLoader.$inline$bar */
- /// CHECK-NEXT: LoadClass
+ /// CHECK-NEXT: LoadClass class_name:java.lang.System
/// CHECK-NEXT: ClinitCheck
/// CHECK-NEXT: StaticFieldGet
/// CHECK-NEXT: LoadString
@@ -105,12 +105,15 @@ class LoadedByMyClassLoader {
/// CHECK-START: void LoadedByMyClassLoader.bar() register (before)
/* Load and initialize FirstSeenByMyClassLoader */
- /// CHECK: LoadClass gen_clinit_check:true
+ /// CHECK: LoadClass class_name:FirstSeenByMyClassLoader gen_clinit_check:true
/* Load and initialize System */
// There may be MipsComputeBaseMethodAddress here.
- /// CHECK: LoadClass gen_clinit_check:true
- /// CHECK-NEXT: StaticFieldGet
- // There may be HArmDexCacheArraysBase or HX86ComputeBaseMethodAddress here.
+ /// CHECK: LoadClass class_name:java.lang.System
+ // The ClinitCheck may (PIC) or may not (non-PIC) be merged into the LoadClass.
+ // (The merging checks for environment match but HLoadClass/kBootImageAddress
+ // used for non-PIC mode does not have an environment at all.)
+ /// CHECK: StaticFieldGet
+ // There may be HX86ComputeBaseMethodAddress or MipsComputeBaseMethodAddress here.
/// CHECK: LoadString
/// CHECK-NEXT: NullCheck
/// CHECK-NEXT: InvokeVirtual
diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java
index 5fd51e1dca..89b9cb45c3 100644
--- a/test/529-checker-unresolved/src/Main.java
+++ b/test/529-checker-unresolved/src/Main.java
@@ -192,13 +192,13 @@ public class Main extends UnresolvedSuperClass {
/// CHECK-START: void Main.testLicm(int) licm (before)
/// CHECK: <<Class:l\d+>> LoadClass loop:B2
/// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:B2
- /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2
+ /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>] loop:B2
/// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2
/// CHECK-START: void Main.testLicm(int) licm (after)
/// CHECK: <<Class:l\d+>> LoadClass loop:none
/// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:none
- /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2
+ /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>] loop:B2
/// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2
static public void testLicm(int count) {
// Test to make sure we keep the initialization check after loading an unresolved class.
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index fe6ff13628..db437686f0 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -331,32 +331,32 @@ public class Main {
/// CHECK-START-X86: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
/// CHECK-START-X86_64: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
/// CHECK-START-ARM: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
/// CHECK-START-ARM64: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
/// CHECK-START-MIPS: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
/// CHECK-START-MIPS64: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
// Note: load kind depends on PIC/non-PIC
// TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
- /// CHECK: LoadClass load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} class_name:java.lang.String
public static Class<?> $noinline$getStringClass() {
// Prevent inlining to avoid the string comparison being optimized away.
@@ -369,34 +369,34 @@ public class Main {
/// CHECK: LoadClass load_kind:DexCacheViaMethod class_name:Other
/// CHECK-START-X86: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-X86: java.lang.Class Main.$noinline$getOtherClass() pc_relative_fixups_x86 (after)
/// CHECK-DAG: X86ComputeBaseMethodAddress
- /// CHECK-DAG: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK-DAG: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-X86_64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-ARM: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-ARM: java.lang.Class Main.$noinline$getOtherClass() dex_cache_array_fixups_arm (after)
/// CHECK-DAG: ArmDexCacheArraysBase
- /// CHECK-DAG: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK-DAG: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-ARM64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-MIPS: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-MIPS: java.lang.Class Main.$noinline$getOtherClass() dex_cache_array_fixups_mips (after)
/// CHECK-DAG: MipsDexCacheArraysBase
- /// CHECK-DAG: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK-DAG: LoadClass load_kind:BssEntry class_name:Other
/// CHECK-START-MIPS64: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
- /// CHECK: LoadClass load_kind:DexCachePcRelative class_name:Other
+ /// CHECK: LoadClass load_kind:BssEntry class_name:Other
public static Class<?> $noinline$getOtherClass() {
// Prevent inlining to avoid the string comparison being optimized away.
diff --git a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
index af43973073..a30a11afb7 100644
--- a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
+++ b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
@@ -196,7 +196,7 @@
const-class v0, LMain;
if-ne v0, v2, :exit
:other_loop_entry
- const-class v1, Ljava/lang/Class; # LoadClass that can throw
+ const-class v1, LOther; # LoadClass that can throw
goto :loop_entry
:exit
return-object v0
@@ -250,7 +250,7 @@
const/4 v0, 0
if-ne p0, v0, :other_loop_entry
:loop_entry
- const-class v1, Ljava/lang/Class; # LoadClass that can throw
+ const-class v1, LOther; # LoadClass that can throw
if-ne v0, p0, :exit
:other_loop_entry
sub-int v1, p0, p0
@@ -286,7 +286,7 @@
.method public static licm3(III)I
.registers 4
:loop_entry
- const-class v0, Ljava/lang/Class; # LoadClass that can throw
+ const-class v0, LOther; # LoadClass that can throw
if-ne p1, p2, :exit
goto :loop_body
diff --git a/test/559-checker-irreducible-loop/src/Main.java b/test/559-checker-irreducible-loop/src/Main.java
index ab84f81291..023e7695fe 100644
--- a/test/559-checker-irreducible-loop/src/Main.java
+++ b/test/559-checker-irreducible-loop/src/Main.java
@@ -67,3 +67,6 @@ public class Main {
int myField;
}
+
+class Other {
+}
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 00c1b02675..b75becffcb 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -35,8 +35,9 @@ static void do_checks(jclass cls, const char* method_name) {
OatQuickMethodHeader* header = nullptr;
// Infinite loop... Test harness will have its own timeout.
while (true) {
- header = OatQuickMethodHeader::FromEntryPoint(method->GetEntryPointFromQuickCompiledCode());
- if (code_cache->ContainsPc(header->GetCode())) {
+ const void* pc = method->GetEntryPointFromQuickCompiledCode();
+ if (code_cache->ContainsPc(pc)) {
+ header = OatQuickMethodHeader::FromEntryPoint(pc);
break;
} else {
// Sleep to yield to the compiler thread.
diff --git a/test/621-checker-new-instance/expected.txt b/test/621-checker-new-instance/expected.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/621-checker-new-instance/expected.txt
+++ /dev/null
diff --git a/test/621-checker-new-instance/info.txt b/test/621-checker-new-instance/info.txt
deleted file mode 100644
index c27c45ca7f..0000000000
--- a/test/621-checker-new-instance/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Tests for removing useless load class.
diff --git a/test/621-checker-new-instance/src/Main.java b/test/621-checker-new-instance/src/Main.java
deleted file mode 100644
index 68a46449f0..0000000000
--- a/test/621-checker-new-instance/src/Main.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
- /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (before)
- /// CHECK: LoadClass
- /// CHECK: NewInstance
-
- /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (after)
- /// CHECK-NOT: LoadClass
- /// CHECK: NewInstance
- public static Object newObject() {
- return new Object();
- }
-
- /// CHECK-START: java.lang.Object Main.newFinalizableMayThrow() prepare_for_register_allocation (after)
- /// CHECK: LoadClass
- /// CHECK: NewInstance
- public static Object newFinalizableMayThrow() {
- return $inline$newFinalizableMayThrow();
- }
-
- public static Object $inline$newFinalizableMayThrow() {
- return new FinalizableMayThrow();
- }
-
- public static void main(String[] args) {
- newFinalizableMayThrow();
- newObject();
- }
-}
-
-class FinalizableMayThrow {
- // clinit may throw OOME.
- static Object o = new Object();
- static String s;
- public void finalize() {
- s = "Test";
- }
-}
diff --git a/test/627-checker-unroll/expected.txt b/test/627-checker-unroll/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/627-checker-unroll/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/627-checker-unroll/info.txt b/test/627-checker-unroll/info.txt
new file mode 100644
index 0000000000..d7885f4bbc
--- /dev/null
+++ b/test/627-checker-unroll/info.txt
@@ -0,0 +1 @@
+Test on loop unrolling.
diff --git a/test/627-checker-unroll/src/Main.java b/test/627-checker-unroll/src/Main.java
new file mode 100644
index 0000000000..9785bdc58b
--- /dev/null
+++ b/test/627-checker-unroll/src/Main.java
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Test on loop unrolling. Removes loop control overhead (including suspend
+// checks) and exposes more opportunities for constant folding.
+//
+public class Main {
+
+ static int sA = 0;
+
+ /// CHECK-START: void Main.unroll() loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: StaticFieldSet loop:<<Loop>>
+ //
+ /// CHECK-START: void Main.unroll() loop_optimization (after)
+ /// CHECK-DAG: StaticFieldSet loop:none
+ //
+ /// CHECK-START: void Main.unroll() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 68 loop:none
+ /// CHECK-DAG: StaticFieldSet [{{l\d+}},<<Int>>] loop:none
+ //
+ /// CHECK-START: void Main.unroll() loop_optimization (after)
+ /// CHECK-NOT: Phi
+ public static void unroll() {
+ for (int i = 4; i < 5; i++) {
+ sA = 17 * i;
+ }
+ }
+
+ /// CHECK-START: int Main.unrollLV() loop_optimization (before)
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: StaticFieldSet loop:<<Loop>>
+ /// CHECK-DAG: Return [<<Phi>>] loop:none
+ //
+ /// CHECK-START: int Main.unrollLV() loop_optimization (after)
+ /// CHECK-DAG: StaticFieldSet loop:none
+ //
+ /// CHECK-START: int Main.unrollLV() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int1:i\d+>> IntConstant 187 loop:none
+ /// CHECK-DAG: <<Int2:i\d+>> IntConstant 12 loop:none
+ /// CHECK-DAG: StaticFieldSet [{{l\d+}},<<Int1>>] loop:none
+ /// CHECK-DAG: Return [<<Int2>>] loop:none
+ //
+ /// CHECK-START: int Main.unrollLV() loop_optimization (after)
+ /// CHECK-NOT: Phi
+ public static int unrollLV() {
+ int i;
+ for (i = 11; i < 12; i++) {
+ sA = 17 * i;
+ }
+ return i;
+ }
+
+ /// CHECK-START: void Main.unrollNest() loop_optimization (before)
+ /// CHECK-DAG: SuspendCheck loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: SuspendCheck loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:<<Loop1>>
+ /// CHECK-DAG: SuspendCheck loop:<<Loop2>> outer_loop:<<Loop1>>
+ /// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop3:B\d+>> outer_loop:<<Loop2>>
+ /// CHECK-DAG: SuspendCheck loop:<<Loop3>> outer_loop:<<Loop2>>
+ /// CHECK-DAG: StaticFieldSet loop:<<Loop3>> outer_loop:<<Loop2>>
+ //
+ /// CHECK-START: void Main.unrollNest() loop_optimization (after)
+ /// CHECK-DAG: StaticFieldSet loop:none
+ /// CHECK-DAG: SuspendCheck loop:none
+ /// CHECK-NOT: SuspendCheck
+ //
+ /// CHECK-START: void Main.unrollNest() instruction_simplifier$after_bce (after)
+ /// CHECK-DAG: <<Int:i\d+>> IntConstant 6 loop:none
+ /// CHECK-DAG: StaticFieldSet [{{l\d+}},<<Int>>] loop:none
+ //
+ /// CHECK-START: void Main.unrollNest() loop_optimization (after)
+ /// CHECK-NOT: Phi
+ public static void unrollNest() {
+ // Unrolling each loop in turn ultimately removes the complete nest!
+ for (int i = 4; i < 5; i++) {
+ for (int j = 5; j < 6; j++) {
+ for (int k = 6; k < 7; k++) {
+ sA = k;
+ }
+ }
+ }
+ }
+
+ //
+ // Verifier.
+ //
+
+ public static void main(String[] args) {
+ unroll();
+ expectEquals(68, sA);
+ expectEquals(12, unrollLV());
+ expectEquals(187, sA);
+ unrollNest();
+ expectEquals(6, sA);
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/633-checker-rtp-getclass/expected.txt b/test/633-checker-rtp-getclass/expected.txt
new file mode 100644
index 0000000000..a178d0414b
--- /dev/null
+++ b/test/633-checker-rtp-getclass/expected.txt
@@ -0,0 +1,3 @@
+2
+3
+6
diff --git a/test/633-checker-rtp-getclass/info.txt b/test/633-checker-rtp-getclass/info.txt
new file mode 100644
index 0000000000..e98a0acbbb
--- /dev/null
+++ b/test/633-checker-rtp-getclass/info.txt
@@ -0,0 +1,3 @@
+Regression test for the RTP pass of the compiler, which
+used the wrong block when bounding a type after a obj.getClass()
+check.
diff --git a/test/633-checker-rtp-getclass/src/Main.java b/test/633-checker-rtp-getclass/src/Main.java
new file mode 100644
index 0000000000..f29c139f63
--- /dev/null
+++ b/test/633-checker-rtp-getclass/src/Main.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.out.println($opt$noinline$foo(new Main()));
+ System.out.println($opt$noinline$foo(new SubMain()));
+ System.out.println($opt$noinline$foo(new SubSubMain()));
+ }
+
+
+ // Checker test to make sure the only inlined instruction is
+ // SubMain.bar.
+ /// CHECK-START: int Main.$opt$noinline$foo(Main) inliner (after)
+ /// CHECK-DAG: InvokeVirtual method_name:Main.foo
+ /// CHECK-DAG: <<Const:i\d+>> IntConstant 3
+ /// CHECK: begin_block
+ /// CHECK: BoundType klass:SubMain
+ /// CHECK: Return [<<Const>>]
+ /// CHECK-NOT: begin_block
+ /// CHECK: end_block
+ public static int $opt$noinline$foo(Main o) {
+ if (doThrow) { throw new Error(); }
+ // To exercise the bug on Jack, we need two getClass compares.
+ if (o.getClass() == Main.class || o.getClass() != SubMain.class) {
+ return o.foo();
+ } else {
+ // We used to wrongly bound the type of o to `Main` here and then realize that's
+ // impossible and mark this branch as dead.
+ return o.bar();
+ }
+ }
+
+ public int bar() {
+ return 1;
+ }
+
+ public int foo() {
+ return 2;
+ }
+
+ public static boolean doThrow = false;
+}
+
+class SubMain extends Main {
+ public int bar() {
+ return 3;
+ }
+
+ public int foo() {
+ return 4;
+ }
+}
+
+class SubSubMain extends SubMain {
+ public int bar() {
+ return 5;
+ }
+
+ public int foo() {
+ return 6;
+ }
+}
diff --git a/test/903-hello-tagging/tagging.cc b/test/903-hello-tagging/tagging.cc
index 60a31bdd2d..f74c1fc2ea 100644
--- a/test/903-hello-tagging/tagging.cc
+++ b/test/903-hello-tagging/tagging.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "tagging.h"
-
#include <iostream>
#include <pthread.h>
#include <stdio.h>
@@ -141,18 +139,6 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getTaggedObjects(JNIEnv* env
return resultArray;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test903HelloTagging
} // namespace art
diff --git a/test/903-hello-tagging/tagging.h b/test/903-hello-tagging/tagging.h
deleted file mode 100644
index f062d44880..0000000000
--- a/test/903-hello-tagging/tagging.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_903_HELLO_TAGGING_TAGGING_H_
-#define ART_TEST_903_HELLO_TAGGING_TAGGING_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test903HelloTagging {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test903HelloTagging
-} // namespace art
-
-#endif // ART_TEST_903_HELLO_TAGGING_TAGGING_H_
diff --git a/test/904-object-allocation/tracking.cc b/test/904-object-allocation/tracking.cc
index f993606b42..95eab0c6cc 100644
--- a/test/904-object-allocation/tracking.cc
+++ b/test/904-object-allocation/tracking.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "tracking.h"
-
#include <iostream>
#include <pthread.h>
#include <stdio.h>
@@ -89,19 +87,6 @@ extern "C" JNIEXPORT void JNICALL Java_Main_enableAllocationTracking(JNIEnv* env
}
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_OBJECT_ALLOC, nullptr);
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test904ObjectAllocation
} // namespace art
diff --git a/test/905-object-free/tracking_free.cc b/test/905-object-free/tracking_free.cc
index 7f295accb2..7b26d79edb 100644
--- a/test/905-object-free/tracking_free.cc
+++ b/test/905-object-free/tracking_free.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "tracking_free.h"
-
#include <iostream>
#include <pthread.h>
#include <stdio.h>
@@ -82,17 +80,5 @@ extern "C" JNIEXPORT jlongArray JNICALL Java_Main_getCollectedTags(JNIEnv* env,
return ret;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test905ObjectFree
} // namespace art
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index a2fd59128f..1362d470e6 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "iterate_heap.h"
-
#include <iostream>
#include <pthread.h>
#include <stdio.h>
@@ -174,17 +172,5 @@ extern "C" JNIEXPORT void JNICALL Java_Main_iterateThroughHeapAdd(JNIEnv* env AT
Run(heap_filter, klass_filter, &config);
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test906IterateHeap
} // namespace art
diff --git a/test/906-iterate-heap/iterate_heap.h b/test/906-iterate-heap/iterate_heap.h
deleted file mode 100644
index f25cdbaf49..0000000000
--- a/test/906-iterate-heap/iterate_heap.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_906_ITERATE_HEAP_ITERATE_HEAP_H_
-#define ART_TEST_906_ITERATE_HEAP_ITERATE_HEAP_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test906IterateHeap {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test906IterateHeap
-} // namespace art
-
-#endif // ART_TEST_906_ITERATE_HEAP_ITERATE_HEAP_H_
diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc
index 36d33b63cc..5bda7ebac8 100644
--- a/test/907-get-loaded-classes/get_loaded_classes.cc
+++ b/test/907-get-loaded-classes/get_loaded_classes.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "get_loaded_classes.h"
-
#include <iostream>
#include <pthread.h>
#include <stdio.h>
@@ -65,17 +63,5 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getLoadedClasses(
return ret;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test907GetLoadedClasses
} // namespace art
diff --git a/test/907-get-loaded-classes/get_loaded_classes.h b/test/907-get-loaded-classes/get_loaded_classes.h
deleted file mode 100644
index 4d27f898cc..0000000000
--- a/test/907-get-loaded-classes/get_loaded_classes.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
-#define ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test907GetLoadedClasses {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test907GetLoadedClasses
-} // namespace art
-
-#endif // ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc
index 1fab79dcb1..59801ff648 100644
--- a/test/908-gc-start-finish/gc_callbacks.cc
+++ b/test/908-gc-start-finish/gc_callbacks.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "gc_callbacks.h"
-
#include <stdio.h>
#include <string.h>
@@ -94,17 +92,5 @@ extern "C" JNIEXPORT jint JNICALL Java_Main_getGcFinishes(JNIEnv* env ATTRIBUTE_
return result;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test908GcStartFinish
} // namespace art
diff --git a/test/908-gc-start-finish/gc_callbacks.h b/test/908-gc-start-finish/gc_callbacks.h
deleted file mode 100644
index 177a4eb7b2..0000000000
--- a/test/908-gc-start-finish/gc_callbacks.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
-#define ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test908GcStartFinish {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test908GcStartFinish
-} // namespace art
-
-#endif // ART_TEST_908_GC_START_FINISH_GC_CALLBACKS_H_
diff --git a/test/909-attach-agent/expected.txt b/test/909-attach-agent/expected.txt
index eacc595aaf..c0bccd6486 100644
--- a/test/909-attach-agent/expected.txt
+++ b/test/909-attach-agent/expected.txt
@@ -1,3 +1,11 @@
Hello, world!
Attached Agent for test 909-attach-agent
Goodbye!
+Hello, world!
+Attached Agent for test 909-attach-agent
+Goodbye!
+Hello, world!
+java.io.IOException: Process is not debuggable.
+ at dalvik.system.VMDebug.attachAgent(Native Method)
+ at Main.main(Main.java:27)
+Goodbye!
diff --git a/test/909-attach-agent/run b/test/909-attach-agent/run
index aed6e83d67..985341bd4f 100755
--- a/test/909-attach-agent/run
+++ b/test/909-attach-agent/run
@@ -24,4 +24,14 @@ fi
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
--android-runtime-option -Xplugin:${plugin} \
+ --android-runtime-option -Xfully-deoptable \
+ --args agent:${agent}=909-attach-agent
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --android-runtime-option -Xfully-deoptable \
+ --args agent:${agent}=909-attach-agent
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
--args agent:${agent}=909-attach-agent
diff --git a/test/910-methods/methods.cc b/test/910-methods/methods.cc
index fa9679db4b..f60fabb1df 100644
--- a/test/910-methods/methods.cc
+++ b/test/910-methods/methods.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "methods.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -207,17 +205,5 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isMethodSynthetic(
return is_synthetic;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test910Methods
} // namespace art
diff --git a/test/910-methods/methods.h b/test/910-methods/methods.h
deleted file mode 100644
index 93d18741ed..0000000000
--- a/test/910-methods/methods.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_910_METHODS_METHODS_H_
-#define ART_TEST_910_METHODS_METHODS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test910Methods {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test910Methods
-} // namespace art
-
-#endif // ART_TEST_910_METHODS_METHODS_H_
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index f8c97ce475..dad08c9f97 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -4,72 +4,72 @@
From top
---------
getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
- print (Ljava/lang/Thread;II)V 0 124
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- doTest ()V 38 34
- main ([Ljava/lang/String;)V 6 24
----------
- print (Ljava/lang/Thread;II)V 0 124
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- doTest ()V 42 35
- main ([Ljava/lang/String;)V 6 24
+ print (Ljava/lang/Thread;II)V 0 34
+ printOrWait (IILControlData;)V 6 39
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ doTest ()V 38 23
+ main ([Ljava/lang/String;)V 6 21
+---------
+ print (Ljava/lang/Thread;II)V 0 34
+ printOrWait (IILControlData;)V 6 39
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ doTest ()V 42 24
+ main ([Ljava/lang/String;)V 6 21
---------
getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
- print (Ljava/lang/Thread;II)V 0 124
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
----------
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ print (Ljava/lang/Thread;II)V 0 34
+ printOrWait (IILControlData;)V 6 39
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+---------
+ printOrWait (IILControlData;)V 6 39
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
From bottom
---------
- main ([Ljava/lang/String;)V 6 24
+ main ([Ljava/lang/String;)V 6 21
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- doTest ()V 65 41
- main ([Ljava/lang/String;)V 6 24
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ doTest ()V 65 30
+ main ([Ljava/lang/String;)V 6 21
---------
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
################################
### Other thread (suspended) ###
@@ -77,132 +77,760 @@ From bottom
From top
---------
wait ()V -1 -2
- printOrWait (IILMain$ControlData;)V 24 157
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 54
----------
- printOrWait (IILMain$ControlData;)V 24 157
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 54
----------
- wait ()V -1 -2
- printOrWait (IILMain$ControlData;)V 24 157
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
----------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 26
+---------
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 26
+---------
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+---------
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
From bottom
---------
- run ()V 4 54
+ run ()V 4 26
---------
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 54
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 26
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
###########################
### Other thread (live) ###
###########################
From top
---------
- printOrWait (IILMain$ControlData;)V 44 164
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 88
----------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 88
----------
- printOrWait (IILMain$ControlData;)V 44 164
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
----------
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
+ printOrWait (IILControlData;)V 44 52
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 59
+---------
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 59
+---------
+ printOrWait (IILControlData;)V 44 52
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+---------
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
From bottom
---------
- run ()V 4 88
+ run ()V 4 59
+---------
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 59
+---------
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+
+################################
+### Other threads (suspended) ###
+################################
+---------
+FinalizerDaemon
+<not printed>
+---------
+FinalizerWatchdogDaemon
+<not printed>
+---------
+HeapTaskDaemon
+<not printed>
+---------
+ReferenceQueueDaemon
+<not printed>
+---------
+Signal Catcher
+
+---------
+Thread-10
+
+---------
+Thread-11
+
+---------
+Thread-12
+
+---------
+Thread-13
+
+---------
+Thread-4
+
+---------
+Thread-5
+
+---------
+Thread-6
+
+---------
+Thread-7
+
+---------
+Thread-8
+
+---------
+Thread-9
+
+---------
+main
+
+---------
+FinalizerDaemon
+<not printed>
+---------
+FinalizerWatchdogDaemon
+<not printed>
---------
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 88
+HeapTaskDaemon
+<not printed>
+---------
+ReferenceQueueDaemon
+<not printed>
+---------
+Signal Catcher
+
+---------
+Thread-10
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-11
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-12
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-13
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-4
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-5
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-6
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-7
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-8
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-9
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+main
+ getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
+ printAll (I)V 0 73
+ doTest ()V 102 57
+ main ([Ljava/lang/String;)V 30 33
+
+---------
+FinalizerDaemon
+<not printed>
+---------
+FinalizerWatchdogDaemon
+<not printed>
+---------
+HeapTaskDaemon
+<not printed>
+---------
+ReferenceQueueDaemon
+<not printed>
+---------
+Signal Catcher
+
+---------
+Thread-10
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
+Thread-11
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-12
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-13
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-4
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-5
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-6
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-7
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-8
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+Thread-9
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 45
+
+---------
+main
+ getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
+ printAll (I)V 0 73
+ doTest ()V 107 59
+ main ([Ljava/lang/String;)V 30 33
+
+
+########################################
+### Other select threads (suspended) ###
+########################################
+---------
+Thread-14
+
+---------
+Thread-16
+
+---------
+Thread-18
+
+---------
+Thread-20
+
+---------
+Thread-22
+
+---------
+main
+
+---------
+Thread-14
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-16
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-18
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-20
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+Thread-22
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+
+---------
+main
+ getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
+ printList ([Ljava/lang/Thread;I)V 0 66
+ doTest ()V 96 52
+ main ([Ljava/lang/String;)V 38 37
+
+---------
+Thread-14
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 35
+
+---------
+Thread-16
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 35
+
+---------
+Thread-18
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 35
+
+---------
+Thread-20
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 35
+
+---------
+Thread-22
+ wait ()V -1 -2
+ printOrWait (IILControlData;)V 24 45
+ baz (IIILControlData;)Ljava/lang/Object; 2 30
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ baz (IIILControlData;)Ljava/lang/Object; 9 32
+ bar (IIILControlData;)J 0 24
+ foo (IIILControlData;)I 0 19
+ run ()V 4 35
+
+---------
+main
+ getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
+ printList ([Ljava/lang/Thread;I)V 0 66
+ doTest ()V 101 54
+ main ([Ljava/lang/String;)V 38 37
+
+
+###################
+### Same thread ###
+###################
+4
+JVMTI_ERROR_ILLEGAL_ARGUMENT
+[public static native java.lang.Object[] Frames.getFrameLocation(java.lang.Thread,int), ffffffff]
+[public static void Frames.doTestSameThread(), 38]
+[public static void Frames.doTest() throws java.lang.Exception, 0]
+[public static void Main.main(java.lang.String[]) throws java.lang.Exception, 2e]
+JVMTI_ERROR_NO_MORE_FRAMES
+
+################################
+### Other thread (suspended) ###
+################################
+18
+JVMTI_ERROR_ILLEGAL_ARGUMENT
+[public final native void java.lang.Object.wait() throws java.lang.InterruptedException, ffffffff]
+[private static void Recurse.printOrWait(int,int,ControlData), 18]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 2]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[public void Frames$1.run(), 4]
+JVMTI_ERROR_NO_MORE_FRAMES
+
+###########################
+### Other thread (live) ###
+###########################
+17
+JVMTI_ERROR_ILLEGAL_ARGUMENT
+[private static void Recurse.printOrWait(int,int,ControlData), 2c]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 2]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[private static java.lang.Object Recurse.baz(int,int,int,ControlData), 9]
+[private static long Recurse.bar(int,int,int,ControlData), 0]
+[public static int Recurse.foo(int,int,int,ControlData), 0]
+[public void Frames$2.run(), 4]
+JVMTI_ERROR_NO_MORE_FRAMES
+Done
diff --git a/test/911-get-stack-trace/src/AllTraces.java b/test/911-get-stack-trace/src/AllTraces.java
new file mode 100644
index 0000000000..adf6f38f5a
--- /dev/null
+++ b/test/911-get-stack-trace/src/AllTraces.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class AllTraces {
+ private final static List<Object> RETAIN = new ArrayList<Object>();
+
+ public static void doTest() throws Exception {
+ System.out.println("################################");
+ System.out.println("### Other threads (suspended) ###");
+ System.out.println("################################");
+
+ // Also create an unstarted and a dead thread.
+ RETAIN.add(new Thread());
+ Thread deadThread = new Thread();
+ RETAIN.add(deadThread);
+ deadThread.start();
+ deadThread.join();
+
+ final int N = 10;
+
+ final ControlData data = new ControlData(N);
+ data.waitFor = new Object();
+
+ Thread threads[] = new Thread[N];
+
+ for (int i = 0; i < N; i++) {
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ threads[i] = t;
+ }
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ printAll(0);
+
+ printAll(5);
+
+ printAll(25);
+
+ // Let the thread make progress and die.
+ synchronized(data.waitFor) {
+ data.waitFor.notifyAll();
+ }
+ for (int i = 0; i < N; i++) {
+ threads[i].join();
+ }
+
+ RETAIN.clear();
+ }
+
+ public static void printAll(int max) {
+ PrintThread.printAll(getAllStackTraces(max));
+ }
+
+ // Get all stack traces. This will return an array with an element for each thread. The element
+ // is an array itself with the first element being the thread, and the second element a nested
+ // String array as in getStackTrace.
+ public static native Object[][] getAllStackTraces(int max);
+}
diff --git a/test/918-fields/fields.h b/test/911-get-stack-trace/src/ControlData.java
index 89bd1614d5..76ac4b8caa 100644
--- a/test/918-fields/fields.h
+++ b/test/911-get-stack-trace/src/ControlData.java
@@ -14,17 +14,18 @@
* limitations under the License.
*/
-#ifndef ART_TEST_918_FIELDS_FIELDS_H_
-#define ART_TEST_918_FIELDS_FIELDS_H_
+import java.util.concurrent.CountDownLatch;
-#include <jni.h>
+public class ControlData {
+ CountDownLatch reached;
+ Object waitFor = null;
+ volatile boolean stop = false;
-namespace art {
-namespace Test918Fields {
+ public ControlData() {
+ this(1);
+ }
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test918Fields
-} // namespace art
-
-#endif // ART_TEST_918_FIELDS_FIELDS_H_
+ public ControlData(int latchCount) {
+ reached = new CountDownLatch(latchCount);
+ }
+}
diff --git a/test/911-get-stack-trace/src/Frames.java b/test/911-get-stack-trace/src/Frames.java
new file mode 100644
index 0000000000..a1a11c3785
--- /dev/null
+++ b/test/911-get-stack-trace/src/Frames.java
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+
+public class Frames {
+ public static void doTest() throws Exception {
+ doTestSameThread();
+
+ System.out.println();
+
+ doTestOtherThreadWait();
+
+ System.out.println();
+
+ doTestOtherThreadBusyLoop();
+ }
+
+ public static void doTestSameThread() {
+ System.out.println("###################");
+ System.out.println("### Same thread ###");
+ System.out.println("###################");
+
+ Thread t = Thread.currentThread();
+
+ int count = getFrameCount(t);
+ System.out.println(count);
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, -1)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+ for (int i = 0; i < count; i++) {
+ System.out.println(Arrays.toString(getFrameLocation(t, i)));
+ }
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, count)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+ }
+
+ public static void doTestOtherThreadWait() throws Exception {
+ System.out.println("################################");
+ System.out.println("### Other thread (suspended) ###");
+ System.out.println("################################");
+ final ControlData data = new ControlData();
+ data.waitFor = new Object();
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ int count = getFrameCount(t);
+ System.out.println(count);
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, -1)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+ for (int i = 0; i < count; i++) {
+ System.out.println(Arrays.toString(getFrameLocation(t, i)));
+ }
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, count)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+
+ // Let the thread make progress and die.
+ synchronized(data.waitFor) {
+ data.waitFor.notifyAll();
+ }
+ t.join();
+ }
+
+ public static void doTestOtherThreadBusyLoop() throws Exception {
+ System.out.println("###########################");
+ System.out.println("### Other thread (live) ###");
+ System.out.println("###########################");
+ final ControlData data = new ControlData();
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ int count = getFrameCount(t);
+ System.out.println(count);
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, -1)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+ for (int i = 0; i < count; i++) {
+ System.out.println(Arrays.toString(getFrameLocation(t, i)));
+ }
+ try {
+ System.out.println(Arrays.toString(getFrameLocation(t, count)));
+ } catch (RuntimeException e) {
+ System.out.println(e.getMessage());
+ }
+
+ // Let the thread stop looping and die.
+ data.stop = true;
+ t.join();
+ }
+
+ public static native int getFrameCount(Thread thread);
+ public static native Object[] getFrameLocation(Thread thread, int depth);
+}
diff --git a/test/911-get-stack-trace/src/Main.java b/test/911-get-stack-trace/src/Main.java
index 722bee8056..b199033c32 100644
--- a/test/911-get-stack-trace/src/Main.java
+++ b/test/911-get-stack-trace/src/Main.java
@@ -14,166 +14,32 @@
* limitations under the License.
*/
-import java.util.Arrays;
-import java.util.concurrent.CountDownLatch;
-
public class Main {
public static void main(String[] args) throws Exception {
System.loadLibrary(args[1]);
- doTest();
- doTestOtherThreadWait();
- doTestOtherThreadBusyLoop();
- }
-
- public static void doTest() throws Exception {
- System.out.println("###################");
- System.out.println("### Same thread ###");
- System.out.println("###################");
- System.out.println("From top");
- Recurse.foo(4, 0, 25, null);
- Recurse.foo(4, 1, 25, null);
- Recurse.foo(4, 0, 5, null);
- Recurse.foo(4, 2, 5, null);
-
- System.out.println("From bottom");
- Recurse.foo(4, -1, 25, null);
- Recurse.foo(4, -5, 5, null);
- Recurse.foo(4, -7, 5, null);
- }
+ SameThread.doTest();
- public static void doTestOtherThreadWait() throws Exception {
System.out.println();
- System.out.println("################################");
- System.out.println("### Other thread (suspended) ###");
- System.out.println("################################");
- final ControlData data = new ControlData();
- data.waitFor = new Object();
- Thread t = new Thread() {
- public void run() {
- Recurse.foo(4, 0, 0, data);
- }
- };
- t.start();
- data.reached.await();
- Thread.yield();
- Thread.sleep(500); // A little bit of time...
- System.out.println("From top");
- print(t, 0, 25);
- print(t, 1, 25);
- print(t, 0, 5);
- print(t, 2, 5);
+ OtherThread.doTestOtherThreadWait();
- System.out.println("From bottom");
- print(t, -1, 25);
- print(t, -5, 5);
- print(t, -7, 5);
-
- // Let the thread make progress and die.
- synchronized(data.waitFor) {
- data.waitFor.notifyAll();
- }
- t.join();
- }
-
- public static void doTestOtherThreadBusyLoop() throws Exception {
System.out.println();
- System.out.println("###########################");
- System.out.println("### Other thread (live) ###");
- System.out.println("###########################");
- final ControlData data = new ControlData();
- Thread t = new Thread() {
- public void run() {
- Recurse.foo(4, 0, 0, data);
- }
- };
- t.start();
- data.reached.await();
- Thread.yield();
- Thread.sleep(500); // A little bit of time...
- System.out.println("From top");
- print(t, 0, 25);
- print(t, 1, 25);
- print(t, 0, 5);
- print(t, 2, 5);
+ OtherThread.doTestOtherThreadBusyLoop();
- System.out.println("From bottom");
- print(t, -1, 25);
- print(t, -5, 5);
- print(t, -7, 5);
-
- // Let the thread stop looping and die.
- data.stop = true;
- t.join();
- }
-
- public static void print(String[][] stack) {
- System.out.println("---------");
- for (String[] stackElement : stack) {
- for (String part : stackElement) {
- System.out.print(' ');
- System.out.print(part);
- }
- System.out.println();
- }
- }
-
- public static void print(Thread t, int start, int max) {
- print(getStackTrace(t, start, max));
- }
+ System.out.println();
- // Wrap generated stack traces into a class to separate them nicely.
- public static class Recurse {
+ AllTraces.doTest();
- public static int foo(int x, int start, int max, ControlData data) {
- bar(x, start, max, data);
- return 0;
- }
+ System.out.println();
- private static long bar(int x, int start, int max, ControlData data) {
- baz(x, start, max, data);
- return 0;
- }
+ ThreadListTraces.doTest();
- private static Object baz(int x, int start, int max, ControlData data) {
- if (x == 0) {
- printOrWait(start, max, data);
- } else {
- foo(x - 1, start, max, data);
- }
- return null;
- }
+ System.out.println();
- private static void printOrWait(int start, int max, ControlData data) {
- if (data == null) {
- print(Thread.currentThread(), start, max);
- } else {
- if (data.waitFor != null) {
- synchronized (data.waitFor) {
- data.reached.countDown();
- try {
- data.waitFor.wait(); // Use wait() as it doesn't have a "hidden" Java call-graph.
- } catch (Throwable t) {
- throw new RuntimeException(t);
- }
- }
- } else {
- data.reached.countDown();
- while (!data.stop) {
- // Busy-loop.
- }
- }
- }
- }
- }
+ Frames.doTest();
- public static class ControlData {
- CountDownLatch reached = new CountDownLatch(1);
- Object waitFor = null;
- volatile boolean stop = false;
+ System.out.println("Done");
}
-
- public static native String[][] getStackTrace(Thread thread, int start, int max);
}
diff --git a/test/911-get-stack-trace/src/OtherThread.java b/test/911-get-stack-trace/src/OtherThread.java
new file mode 100644
index 0000000000..0748433a20
--- /dev/null
+++ b/test/911-get-stack-trace/src/OtherThread.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class OtherThread {
+ public static void doTestOtherThreadWait() throws Exception {
+ System.out.println("################################");
+ System.out.println("### Other thread (suspended) ###");
+ System.out.println("################################");
+ final ControlData data = new ControlData();
+ data.waitFor = new Object();
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ System.out.println("From top");
+ PrintThread.print(t, 0, 25);
+ PrintThread.print(t, 1, 25);
+ PrintThread.print(t, 0, 5);
+ PrintThread.print(t, 2, 5);
+
+ System.out.println("From bottom");
+ PrintThread.print(t, -1, 25);
+ PrintThread.print(t, -5, 5);
+ PrintThread.print(t, -7, 5);
+
+ // Let the thread make progress and die.
+ synchronized(data.waitFor) {
+ data.waitFor.notifyAll();
+ }
+ t.join();
+ }
+
+ public static void doTestOtherThreadBusyLoop() throws Exception {
+ System.out.println("###########################");
+ System.out.println("### Other thread (live) ###");
+ System.out.println("###########################");
+ final ControlData data = new ControlData();
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ System.out.println("From top");
+ PrintThread.print(t, 0, 25);
+ PrintThread.print(t, 1, 25);
+ PrintThread.print(t, 0, 5);
+ PrintThread.print(t, 2, 5);
+
+ System.out.println("From bottom");
+ PrintThread.print(t, -1, 25);
+ PrintThread.print(t, -5, 5);
+ PrintThread.print(t, -7, 5);
+
+ // Let the thread stop looping and die.
+ data.stop = true;
+ t.join();
+ }
+}
diff --git a/test/911-get-stack-trace/src/PrintThread.java b/test/911-get-stack-trace/src/PrintThread.java
new file mode 100644
index 0000000000..97815ccad9
--- /dev/null
+++ b/test/911-get-stack-trace/src/PrintThread.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+public class PrintThread {
+ public static void print(String[][] stack) {
+ System.out.println("---------");
+ for (String[] stackElement : stack) {
+ for (String part : stackElement) {
+ System.out.print(' ');
+ System.out.print(part);
+ }
+ System.out.println();
+ }
+ }
+
+ public static void print(Thread t, int start, int max) {
+ print(getStackTrace(t, start, max));
+ }
+
+ public static void printAll(Object[][] stacks) {
+ List<String> stringified = new ArrayList<String>(stacks.length);
+
+ for (Object[] stackInfo : stacks) {
+ Thread t = (Thread)stackInfo[0];
+ String name = (t != null) ? t.getName() : "null";
+ String stackSerialization;
+ if (name.contains("Daemon")) {
+ // Do not print daemon stacks, as they're non-deterministic.
+ stackSerialization = "<not printed>";
+ } else {
+ StringBuilder sb = new StringBuilder();
+ for (String[] stackElement : (String[][])stackInfo[1]) {
+ for (String part : stackElement) {
+ sb.append(' ');
+ sb.append(part);
+ }
+ sb.append('\n');
+ }
+ stackSerialization = sb.toString();
+ }
+ stringified.add(name + "\n" + stackSerialization);
+ }
+
+ Collections.sort(stringified);
+
+ for (String s : stringified) {
+ System.out.println("---------");
+ System.out.println(s);
+ }
+ }
+
+ public static native String[][] getStackTrace(Thread thread, int start, int max);
+} \ No newline at end of file
diff --git a/test/911-get-stack-trace/src/Recurse.java b/test/911-get-stack-trace/src/Recurse.java
new file mode 100644
index 0000000000..439fbaaf84
--- /dev/null
+++ b/test/911-get-stack-trace/src/Recurse.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Recurse {
+ public static int foo(int x, int start, int max, ControlData data) {
+ bar(x, start, max, data);
+ return 0;
+ }
+
+ private static long bar(int x, int start, int max, ControlData data) {
+ baz(x, start, max, data);
+ return 0;
+ }
+
+ private static Object baz(int x, int start, int max, ControlData data) {
+ if (x == 0) {
+ printOrWait(start, max, data);
+ } else {
+ foo(x - 1, start, max, data);
+ }
+ return null;
+ }
+
+ private static void printOrWait(int start, int max, ControlData data) {
+ if (data == null) {
+ PrintThread.print(Thread.currentThread(), start, max);
+ } else {
+ if (data.waitFor != null) {
+ synchronized (data.waitFor) {
+ data.reached.countDown();
+ try {
+ data.waitFor.wait(); // Use wait() as it doesn't have a "hidden" Java call-graph.
+ } catch (Throwable t) {
+ throw new RuntimeException(t);
+ }
+ }
+ } else {
+ data.reached.countDown();
+ while (!data.stop) {
+ // Busy-loop.
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/test/904-object-allocation/tracking.h b/test/911-get-stack-trace/src/SameThread.java
index 21c1837523..f1e19e367a 100644
--- a/test/904-object-allocation/tracking.h
+++ b/test/911-get-stack-trace/src/SameThread.java
@@ -14,17 +14,20 @@
* limitations under the License.
*/
-#ifndef ART_TEST_904_OBJECT_ALLOCATION_TRACKING_H_
-#define ART_TEST_904_OBJECT_ALLOCATION_TRACKING_H_
+public class SameThread {
+ public static void doTest() throws Exception {
+ System.out.println("###################");
+ System.out.println("### Same thread ###");
+ System.out.println("###################");
+ System.out.println("From top");
+ Recurse.foo(4, 0, 25, null);
+ Recurse.foo(4, 1, 25, null);
+ Recurse.foo(4, 0, 5, null);
+ Recurse.foo(4, 2, 5, null);
-#include <jni.h>
-
-namespace art {
-namespace Test904ObjectAllocation {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test904ObjectAllocation
-} // namespace art
-
-#endif // ART_TEST_904_OBJECT_ALLOCATION_TRACKING_H_
+ System.out.println("From bottom");
+ Recurse.foo(4, -1, 25, null);
+ Recurse.foo(4, -5, 5, null);
+ Recurse.foo(4, -7, 5, null);
+ }
+}
diff --git a/test/911-get-stack-trace/src/ThreadListTraces.java b/test/911-get-stack-trace/src/ThreadListTraces.java
new file mode 100644
index 0000000000..f66557f3bd
--- /dev/null
+++ b/test/911-get-stack-trace/src/ThreadListTraces.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class ThreadListTraces {
+ public static void doTest() throws Exception {
+ System.out.println("########################################");
+ System.out.println("### Other select threads (suspended) ###");
+ System.out.println("########################################");
+
+ final int N = 10;
+
+ final ControlData data = new ControlData(N);
+ data.waitFor = new Object();
+
+ Thread threads[] = new Thread[N];
+
+ Thread list[] = new Thread[N/2 + 1];
+
+ for (int i = 0; i < N; i++) {
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ threads[i] = t;
+ if (i % 2 == 0) {
+ list[i/2] = t;
+ }
+ }
+ list[list.length - 1] = Thread.currentThread();
+
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ printList(list, 0);
+
+ printList(list, 5);
+
+ printList(list, 25);
+
+ // Let the thread make progress and die.
+ synchronized(data.waitFor) {
+ data.waitFor.notifyAll();
+ }
+ for (int i = 0; i < N; i++) {
+ threads[i].join();
+ }
+ }
+
+ public static void printList(Thread[] threads, int max) {
+ PrintThread.printAll(getThreadListStackTraces(threads, max));
+ }
+
+ // Similar to getAllStackTraces, but restricted to the given threads.
+ public static native Object[][] getThreadListStackTraces(Thread threads[], int max);
+}
diff --git a/test/911-get-stack-trace/stack_trace.cc b/test/911-get-stack-trace/stack_trace.cc
index b3e8bc3b1f..d162e8a169 100644
--- a/test/911-get-stack-trace/stack_trace.cc
+++ b/test/911-get-stack-trace/stack_trace.cc
@@ -14,14 +14,13 @@
* limitations under the License.
*/
-#include "stack_trace.h"
-
#include <inttypes.h>
#include <memory>
#include <stdio.h>
#include "android-base/stringprintf.h"
+#include "android-base/stringprintf.h"
#include "base/logging.h"
#include "base/macros.h"
#include "jni.h"
@@ -52,33 +51,16 @@ static jint FindLineNumber(jint line_number_count,
return line_number;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
- JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint start, jint max) {
- std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]);
-
- jint count;
- {
- jvmtiError result = jvmti_env->GetStackTrace(thread, start, max, frames.get(), &count);
- if (result != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(result, &err);
- printf("Failure running GetStackTrace: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- return nullptr;
- }
- }
-
+static jobjectArray TranslateJvmtiFrameInfoArray(JNIEnv* env,
+ jvmtiFrameInfo* frames,
+ jint count) {
auto callback = [&](jint method_index) -> jobjectArray {
char* name;
char* sig;
char* gen;
{
jvmtiError result2 = jvmti_env->GetMethodName(frames[method_index].method, &name, &sig, &gen);
- if (result2 != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(result2, &err);
- printf("Failure running GetMethodName: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ if (JvmtiErrorToException(env, result2)) {
return nullptr;
}
}
@@ -142,16 +124,133 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
return CreateObjectArray(env, count, "[Ljava/lang/String;", callback);
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
+extern "C" JNIEXPORT jobjectArray JNICALL Java_PrintThread_getStackTrace(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint start, jint max) {
+ std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]);
+
+ jint count;
+ {
+ jvmtiError result = jvmti_env->GetStackTrace(thread, start, max, frames.get(), &count);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+ }
+
+ return TranslateJvmtiFrameInfoArray(env, frames.get(), count);
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_AllTraces_getAllStackTraces(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jint max) {
+ jint thread_count;
+ jvmtiStackInfo* stack_infos;
+ {
+ jvmtiError result = jvmti_env->GetAllStackTraces(max, &stack_infos, &thread_count);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
}
- SetAllCapabilities(jvmti_env);
- return 0;
+
+ auto callback = [&](jint thread_index) -> jobject {
+ auto inner_callback = [&](jint index) -> jobject {
+ if (index == 0) {
+ return stack_infos[thread_index].thread;
+ } else {
+ return TranslateJvmtiFrameInfoArray(env,
+ stack_infos[thread_index].frame_buffer,
+ stack_infos[thread_index].frame_count);
+ }
+ };
+ return CreateObjectArray(env, 2, "java/lang/Object", inner_callback);
+ };
+ jobjectArray ret = CreateObjectArray(env, thread_count, "[Ljava/lang/Object;", callback);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(stack_infos));
+ return ret;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_ThreadListTraces_getThreadListStackTraces(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobjectArray jthreads, jint max) {
+ jint thread_count = env->GetArrayLength(jthreads);
+ std::unique_ptr<jthread[]> threads(new jthread[thread_count]);
+ for (jint i = 0; i != thread_count; ++i) {
+ threads[i] = env->GetObjectArrayElement(jthreads, i);
+ }
+
+ jvmtiStackInfo* stack_infos;
+ {
+ jvmtiError result = jvmti_env->GetThreadListStackTraces(thread_count,
+ threads.get(),
+ max,
+ &stack_infos);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+ }
+
+ auto callback = [&](jint thread_index) -> jobject {
+ auto inner_callback = [&](jint index) -> jobject {
+ if (index == 0) {
+ return stack_infos[thread_index].thread;
+ } else {
+ return TranslateJvmtiFrameInfoArray(env,
+ stack_infos[thread_index].frame_buffer,
+ stack_infos[thread_index].frame_count);
+ }
+ };
+ return CreateObjectArray(env, 2, "java/lang/Object", inner_callback);
+ };
+ jobjectArray ret = CreateObjectArray(env, thread_count, "[Ljava/lang/Object;", callback);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(stack_infos));
+ return ret;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Frames_getFrameCount(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread) {
+ jint count;
+ jvmtiError result = jvmti_env->GetFrameCount(thread, &count);
+ if (JvmtiErrorToException(env, result)) {
+ return -1;
+ }
+ return count;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Frames_getFrameLocation(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint depth) {
+ jmethodID method;
+ jlocation location;
+
+ jvmtiError result = jvmti_env->GetFrameLocation(thread, depth, &method, &location);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+
+ auto callback = [&](jint index) -> jobject {
+ switch (index) {
+ case 0:
+ {
+ jclass decl_class;
+ jvmtiError class_result = jvmti_env->GetMethodDeclaringClass(method, &decl_class);
+ if (JvmtiErrorToException(env, class_result)) {
+ return nullptr;
+ }
+ jint modifiers;
+ jvmtiError mod_result = jvmti_env->GetMethodModifiers(method, &modifiers);
+ if (JvmtiErrorToException(env, mod_result)) {
+ return nullptr;
+ }
+ constexpr jint kStatic = 0x8;
+ return env->ToReflectedMethod(decl_class,
+ method,
+ (modifiers & kStatic) != 0 ? JNI_TRUE : JNI_FALSE);
+ }
+ case 1:
+ return env->NewStringUTF(
+ android::base::StringPrintf("%x", static_cast<uint32_t>(location)).c_str());
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ };
+ jobjectArray ret = CreateObjectArray(env, 2, "java/lang/Object", callback);
+ return ret;
}
} // namespace Test911GetStackTrace
diff --git a/test/911-get-stack-trace/stack_trace.h b/test/911-get-stack-trace/stack_trace.h
deleted file mode 100644
index eba2a91da1..0000000000
--- a/test/911-get-stack-trace/stack_trace.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_911_GET_STACK_TRACE_STACK_TRACE_H_
-#define ART_TEST_911_GET_STACK_TRACE_STACK_TRACE_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test911GetStackTrace {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test911GetStackTrace
-} // namespace art
-
-#endif // ART_TEST_911_GET_STACK_TRACE_STACK_TRACE_H_
diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc
index 38a4f0e337..69301c7925 100644
--- a/test/912-classes/classes.cc
+++ b/test/912-classes/classes.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "classes.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -224,17 +222,5 @@ extern "C" JNIEXPORT jobject JNICALL Java_Main_getClassLoader(
return classloader;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test912Classes
} // namespace art
diff --git a/test/912-classes/classes.h b/test/912-classes/classes.h
deleted file mode 100644
index 62fb203356..0000000000
--- a/test/912-classes/classes.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_912_CLASSES_CLASSES_H_
-#define ART_TEST_912_CLASSES_CLASSES_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test912Classes {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test912Classes
-} // namespace art
-
-#endif // ART_TEST_912_CLASSES_CLASSES_H_
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index 0b232af0df..67599192cf 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "heaps.h"
-
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
@@ -495,17 +493,5 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_followReferences(JNIEnv* env
return ret;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test913Heaps
} // namespace art
diff --git a/test/918-fields/fields.cc b/test/918-fields/fields.cc
index 4d2b34b94e..7d29912f47 100644
--- a/test/918-fields/fields.cc
+++ b/test/918-fields/fields.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "fields.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -132,17 +130,5 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isFieldSynthetic(
return synth;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test918Fields
} // namespace art
diff --git a/test/920-objects/objects.cc b/test/920-objects/objects.cc
index 886dd0e673..0553a9d007 100644
--- a/test/920-objects/objects.cc
+++ b/test/920-objects/objects.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "objects.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -61,17 +59,5 @@ extern "C" JNIEXPORT jint JNICALL Java_Main_getObjectHashCode(
return hash;
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test920Objects
} // namespace art
diff --git a/test/920-objects/objects.h b/test/920-objects/objects.h
deleted file mode 100644
index 5f21e7b7cb..0000000000
--- a/test/920-objects/objects.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_TEST_920_OBJECTS_OBJECTS_H_
-#define ART_TEST_920_OBJECTS_OBJECTS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test920Objects {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test920Objects
-} // namespace art
-
-#endif // ART_TEST_920_OBJECTS_OBJECTS_H_
diff --git a/test/921-hello-failure/expected.txt b/test/921-hello-failure/expected.txt
index e2665ef30b..1c1d4d9b80 100644
--- a/test/921-hello-failure/expected.txt
+++ b/test/921-hello-failure/expected.txt
@@ -13,3 +13,11 @@ hello2 - MissingInterface
hello2 - ReorderInterface
Transformation error : java.lang.Exception(Failed to redefine class <LTransform2;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED)
hello2 - ReorderInterface
+hello - MultiRedef
+hello2 - MultiRedef
+Transformation error : java.lang.Exception(Failed to redefine classes <LTransform2;, LTransform;> due to JVMTI_ERROR_NAMES_DONT_MATCH)
+hello - MultiRedef
+hello2 - MultiRedef
+Transformation error : java.lang.Exception(Failed to redefine classes <LTransform;, LTransform2;> due to JVMTI_ERROR_NAMES_DONT_MATCH)
+hello - MultiRedef
+hello2 - MultiRedef
diff --git a/test/922-properties/properties.h b/test/921-hello-failure/src/CommonClassDefinition.java
index 84feb10758..62602a02e9 100644
--- a/test/922-properties/properties.h
+++ b/test/921-hello-failure/src/CommonClassDefinition.java
@@ -14,17 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_TEST_922_PROPERTIES_PROPERTIES_H_
-#define ART_TEST_922_PROPERTIES_PROPERTIES_H_
+class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
-#include <jni.h>
-
-namespace art {
-namespace Test922Properties {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test922Properties
-} // namespace art
-
-#endif // ART_TEST_922_PROPERTIES_PROPERTIES_H_
+ CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+}
diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java
index 69c48e26cc..1fe259961d 100644
--- a/test/921-hello-failure/src/Main.java
+++ b/test/921-hello-failure/src/Main.java
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+import java.util.ArrayList;
public class Main {
public static void main(String[] args) {
@@ -23,10 +24,30 @@ public class Main {
NewInterface.doTest(new Transform2());
MissingInterface.doTest(new Transform2());
ReorderInterface.doTest(new Transform2());
+ MultiRedef.doTest(new Transform(), new Transform2());
}
// Transforms the class. This throws an exception if something goes wrong.
public static native void doCommonClassRedefinition(Class<?> target,
byte[] classfile,
byte[] dexfile) throws Exception;
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) throws Exception {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles) throws Exception;
}
diff --git a/test/921-hello-failure/src/MultiRedef.java b/test/921-hello-failure/src/MultiRedef.java
new file mode 100644
index 0000000000..c64342c8f8
--- /dev/null
+++ b/test/921-hello-failure/src/MultiRedef.java
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MultiRedef {
+
+ // class NotTransform {
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static CommonClassDefinition INVALID_DEFINITION_T1 = new CommonClassDefinition(
+ Transform.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+ "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" +
+ "aWxlAQARTm90VHJhbnNmb3JtLmphdmEMAAcACAEAD2phdmEvbGFuZy9FcnJvcgEAFVNob3VsZCBu" +
+ "b3QgYmUgY2FsbGVkIQwABwAMAQAMTm90VHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAAgAAUA" +
+ "BgAAAAAAAgAAAAcACAABAAkAAAAdAAEAAQAAAAUqtwABsQAAAAEACgAAAAYAAQAAAAEAAQALAAwA" +
+ "AQAJAAAAIgADAAIAAAAKuwACWRIDtwAEvwAAAAEACgAAAAYAAQAAAAMAAQANAAAAAgAO"),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQDLV95i5xnv6iUi6uIeDoY5jP5Xe9NP1AiYAgAAcAAAAHhWNBIAAAAAAAAAAAQCAAAL" +
+ "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACQAQAACAEAAEoB" +
+ "AABSAQAAYgEAAHUBAACJAQAAnQEAALABAADHAQAAygEAAM4BAADiAQAAAQAAAAIAAAADAAAABAAA" +
+ "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+ "AAAAAAAAAAAAAAAAAgAAAAAAAAAFAAAAAAAAAPQBAAAAAAAAAQABAAEAAADpAQAABAAAAHAQAwAA" +
+ "AA4ABAACAAIAAADuAQAACQAAACIAAQAbAQYAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgAOTE5v" +
+ "dFRyYW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZh" +
+ "L2xhbmcvU3RyaW5nOwARTm90VHJhbnNmb3JtLmphdmEAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAB" +
+ "VgACVkwAEmVtaXR0ZXI6IGphY2stNC4yMAAFc2F5SGkAAQAHDgADAQAHDgAAAAEBAICABIgCAQGg" +
+ "AgAADAAAAAAAAAABAAAAAAAAAAEAAAALAAAAcAAAAAIAAAAFAAAAnAAAAAMAAAACAAAAsAAAAAUA" +
+ "AAAEAAAAyAAAAAYAAAABAAAA6AAAAAEgAAACAAAACAEAAAEQAAABAAAARAEAAAIgAAALAAAASgEA" +
+ "AAMgAAACAAAA6QEAAAAgAAABAAAA9AEAAAAQAAABAAAABAIAAA=="));
+
+ // Valid redefinition of Transform2
+ // class Transform2 implements Iface1, Iface2 {
+ // public void sayHi(String name) {
+ // throw new Error("Should not be called!");
+ // }
+ // }
+ private static CommonClassDefinition VALID_DEFINITION_T2 = new CommonClassDefinition(
+ Transform2.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAGQoABgARBwASCAATCgACABQHABUHABYHABcHABgBAAY8aW5pdD4BAAMoKVYBAARD" +
+ "b2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApT" +
+ "b3VyY2VGaWxlAQAPVHJhbnNmb3JtMi5qYXZhDAAJAAoBAA9qYXZhL2xhbmcvRXJyb3IBABVTaG91" +
+ "bGQgbm90IGJlIGNhbGxlZCEMAAkADgEAClRyYW5zZm9ybTIBABBqYXZhL2xhbmcvT2JqZWN0AQAG" +
+ "SWZhY2UxAQAGSWZhY2UyACAABQAGAAIABwAIAAAAAgAAAAkACgABAAsAAAAdAAEAAQAAAAUqtwAB" +
+ "sQAAAAEADAAAAAYAAQAAAAEAAQANAA4AAQALAAAAIgADAAIAAAAKuwACWRIDtwAEvwAAAAEADAAA" +
+ "AAYAAQAAAAMAAQAPAAAAAgAQ"),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQDSWls05CPkX+gbTGMVRvx9dc9vozzVbu7AAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAN" +
+ "AAAAcAAAAAcAAACkAAAAAgAAAMAAAAAAAAAAAAAAAAQAAADYAAAAAQAAAPgAAACoAQAAGAEAAGIB" +
+ "AABqAQAAdAEAAH4BAACMAQAAnwEAALMBAADHAQAA3gEAAO8BAADyAQAA9gEAAAoCAAABAAAAAgAA" +
+ "AAMAAAAEAAAABQAAAAYAAAAJAAAACQAAAAYAAAAAAAAACgAAAAYAAABcAQAAAgAAAAAAAAACAAEA" +
+ "DAAAAAMAAQAAAAAABAAAAAAAAAACAAAAAAAAAAQAAABUAQAACAAAAAAAAAAcAgAAAAAAAAEAAQAB" +
+ "AAAAEQIAAAQAAABwEAMAAAAOAAQAAgACAAAAFgIAAAkAAAAiAAMAGwEHAAAAcCACABAAJwAAAAIA" +
+ "AAAAAAEAAQAAAAUABjxpbml0PgAITElmYWNlMTsACExJZmFjZTI7AAxMVHJhbnNmb3JtMjsAEUxq" +
+ "YXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAV" +
+ "U2hvdWxkIG5vdCBiZSBjYWxsZWQhAA9UcmFuc2Zvcm0yLmphdmEAAVYAAlZMABJlbWl0dGVyOiBq" +
+ "YWNrLTQuMjAABXNheUhpAAEABw4AAwEABw4AAAABAQCAgASYAgEBsAIAAAwAAAAAAAAAAQAAAAAA" +
+ "AAABAAAADQAAAHAAAAACAAAABwAAAKQAAAADAAAAAgAAAMAAAAAFAAAABAAAANgAAAAGAAAAAQAA" +
+ "APgAAAABIAAAAgAAABgBAAABEAAAAgAAAFQBAAACIAAADQAAAGIBAAADIAAAAgAAABECAAAAIAAA" +
+ "AQAAABwCAAAAEAAAAQAAACwCAAA="));
+
+ public static void doTest(Transform t1, Transform2 t2) {
+ t1.sayHi("MultiRedef");
+ t2.sayHi("MultiRedef");
+ try {
+ Main.doMultiClassRedefinition(VALID_DEFINITION_T2, INVALID_DEFINITION_T1);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t1.sayHi("MultiRedef");
+ t2.sayHi("MultiRedef");
+ try {
+ Main.doMultiClassRedefinition(INVALID_DEFINITION_T1, VALID_DEFINITION_T2);
+ } catch (Exception e) {
+ System.out.println(
+ "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+ }
+ t1.sayHi("MultiRedef");
+ t2.sayHi("MultiRedef");
+ }
+}
diff --git a/test/922-properties/properties.cc b/test/922-properties/properties.cc
index b1e7fce3b5..cb732c74f1 100644
--- a/test/922-properties/properties.cc
+++ b/test/922-properties/properties.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "properties.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -91,17 +89,5 @@ extern "C" JNIEXPORT void JNICALL Java_Main_setSystemProperty(
}
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test922Properties
} // namespace art
diff --git a/test/923-monitors/monitors.cc b/test/923-monitors/monitors.cc
index 2aa36cbdba..4baa530ec2 100644
--- a/test/923-monitors/monitors.cc
+++ b/test/923-monitors/monitors.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "monitors.h"
-
#include <stdio.h>
#include "base/macros.h"
@@ -84,17 +82,5 @@ extern "C" JNIEXPORT void JNICALL Java_Main_rawMonitorNotifyAll(
JvmtiErrorToException(env, result);
}
-// Don't do anything
-jint OnLoad(JavaVM* vm,
- char* options ATTRIBUTE_UNUSED,
- void* reserved ATTRIBUTE_UNUSED) {
- if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
- printf("Unable to get jvmti env!\n");
- return 1;
- }
- SetAllCapabilities(jvmti_env);
- return 0;
-}
-
} // namespace Test923Monitors
} // namespace art
diff --git a/test/924-threads/build b/test/924-threads/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/924-threads/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/924-threads/expected.txt b/test/924-threads/expected.txt
new file mode 100644
index 0000000000..32e3368d02
--- /dev/null
+++ b/test/924-threads/expected.txt
@@ -0,0 +1,31 @@
+currentThread OK
+main
+5
+false
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+main
+5
+false
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+Daemon Thread
+5
+true
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+Daemon Thread
+5
+true
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+5
+5
+0 = NEW
+191 = ALIVE|WAITING_INDEFINITELY|WAITING|IN_OBJECT_WAIT
+1a1 = ALIVE|WAITING_WITH_TIMEOUT|WAITING|IN_OBJECT_WAIT
+401 = ALIVE|BLOCKED_ON_MONITOR_ENTER
+e1 = ALIVE|WAITING_WITH_TIMEOUT|SLEEPING|WAITING
+5 = ALIVE|RUNNABLE
+2 = TERMINATED
+[Thread[FinalizerDaemon,5,system], Thread[FinalizerWatchdogDaemon,5,system], Thread[HeapTaskDaemon,5,system], Thread[ReferenceQueueDaemon,5,system], Thread[Signal Catcher,5,system], Thread[main,5,main]]
diff --git a/test/924-threads/info.txt b/test/924-threads/info.txt
new file mode 100644
index 0000000000..875a5f6ec1
--- /dev/null
+++ b/test/924-threads/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/924-threads/run b/test/924-threads/run
new file mode 100755
index 0000000000..4379349cb2
--- /dev/null
+++ b/test/924-threads/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --jvmti
diff --git a/test/924-threads/src/Main.java b/test/924-threads/src/Main.java
new file mode 100644
index 0000000000..492a7ac6d1
--- /dev/null
+++ b/test/924-threads/src/Main.java
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.concurrent.CountDownLatch;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ private static void doTest() throws Exception {
+ Thread t1 = Thread.currentThread();
+ Thread t2 = getCurrentThread();
+
+ if (t1 != t2) {
+ throw new RuntimeException("Expected " + t1 + " but got " + t2);
+ }
+ System.out.println("currentThread OK");
+
+ printThreadInfo(t1);
+ printThreadInfo(null);
+
+ Thread t3 = new Thread("Daemon Thread");
+ t3.setDaemon(true);
+ // Do not start this thread, yet.
+ printThreadInfo(t3);
+ // Start, and wait for it to die.
+ t3.start();
+ t3.join();
+ Thread.sleep(500); // Wait a little bit.
+ // Thread has died, check that we can still get info.
+ printThreadInfo(t3);
+
+ doStateTests();
+
+ doAllThreadsTests();
+ }
+
+ private static class Holder {
+ volatile boolean flag = false;
+ }
+
+ private static void doStateTests() throws Exception {
+ System.out.println(Integer.toHexString(getThreadState(null)));
+ System.out.println(Integer.toHexString(getThreadState(Thread.currentThread())));
+
+ final CountDownLatch cdl1 = new CountDownLatch(1);
+ final CountDownLatch cdl2 = new CountDownLatch(1);
+ final CountDownLatch cdl3_1 = new CountDownLatch(1);
+ final CountDownLatch cdl3_2 = new CountDownLatch(1);
+ final CountDownLatch cdl4 = new CountDownLatch(1);
+ final CountDownLatch cdl5 = new CountDownLatch(1);
+ final Holder h = new Holder();
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ cdl1.countDown();
+ synchronized(cdl1) {
+ cdl1.wait();
+ }
+
+ cdl2.countDown();
+ synchronized(cdl2) {
+ cdl2.wait(1000); // Wait a second.
+ }
+
+ cdl3_1.await();
+ cdl3_2.countDown();
+ synchronized(cdl3_2) {
+ // Nothing, just wanted to block on cdl3.
+ }
+
+ cdl4.countDown();
+ Thread.sleep(1000);
+
+ cdl5.countDown();
+ while (!h.flag) {
+ // Busy-loop.
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+
+ Thread t = new Thread(r);
+ printThreadState(t);
+ t.start();
+
+ // Waiting.
+ cdl1.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ synchronized(cdl1) {
+ cdl1.notifyAll();
+ }
+
+ // Timed waiting.
+ cdl2.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ synchronized(cdl2) {
+ cdl2.notifyAll();
+ }
+
+ // Blocked on monitor.
+ synchronized(cdl3_2) {
+ cdl3_1.countDown();
+ cdl3_2.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ }
+
+ // Sleeping.
+ cdl4.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+
+ // Running.
+ cdl5.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ h.flag = true;
+
+ // Dying.
+ t.join();
+ Thread.yield();
+ Thread.sleep(100);
+
+ printThreadState(t);
+ }
+
+ private static void doAllThreadsTests() {
+ Thread[] threads = getAllThreads();
+ Arrays.sort(threads, THREAD_COMP);
+ System.out.println(Arrays.toString(threads));
+ }
+
+ private final static Comparator<Thread> THREAD_COMP = new Comparator<Thread>() {
+ public int compare(Thread o1, Thread o2) {
+ return o1.getName().compareTo(o2.getName());
+ }
+ };
+
+ private final static Map<Integer, String> STATE_NAMES = new HashMap<Integer, String>();
+ private final static List<Integer> STATE_KEYS = new ArrayList<Integer>();
+ static {
+ STATE_NAMES.put(0x1, "ALIVE");
+ STATE_NAMES.put(0x2, "TERMINATED");
+ STATE_NAMES.put(0x4, "RUNNABLE");
+ STATE_NAMES.put(0x400, "BLOCKED_ON_MONITOR_ENTER");
+ STATE_NAMES.put(0x80, "WAITING");
+ STATE_NAMES.put(0x10, "WAITING_INDEFINITELY");
+ STATE_NAMES.put(0x20, "WAITING_WITH_TIMEOUT");
+ STATE_NAMES.put(0x40, "SLEEPING");
+ STATE_NAMES.put(0x100, "IN_OBJECT_WAIT");
+ STATE_NAMES.put(0x200, "PARKED");
+ STATE_NAMES.put(0x100000, "SUSPENDED");
+ STATE_NAMES.put(0x200000, "INTERRUPTED");
+ STATE_NAMES.put(0x400000, "IN_NATIVE");
+ STATE_KEYS.addAll(STATE_NAMES.keySet());
+ Collections.sort(STATE_KEYS);
+ }
+
+ private static void printThreadState(Thread t) {
+ int state = getThreadState(t);
+
+ StringBuilder sb = new StringBuilder();
+
+ for (Integer i : STATE_KEYS) {
+ if ((state & i) != 0) {
+ if (sb.length()>0) {
+ sb.append('|');
+ }
+ sb.append(STATE_NAMES.get(i));
+ }
+ }
+
+ if (sb.length() == 0) {
+ sb.append("NEW");
+ }
+
+ System.out.println(Integer.toHexString(state) + " = " + sb.toString());
+ }
+
+ private static void printThreadInfo(Thread t) {
+ Object[] threadInfo = getThreadInfo(t);
+ if (threadInfo == null || threadInfo.length != 5) {
+ System.out.println(Arrays.toString(threadInfo));
+ throw new RuntimeException("threadInfo length wrong");
+ }
+
+ System.out.println(threadInfo[0]); // Name
+ System.out.println(threadInfo[1]); // Priority
+ System.out.println(threadInfo[2]); // Daemon
+ System.out.println(threadInfo[3]); // Threadgroup
+ System.out.println(threadInfo[4] == null ? "null" : threadInfo[4].getClass()); // Context CL.
+ }
+
+ private static native Thread getCurrentThread();
+ private static native Object[] getThreadInfo(Thread t);
+ private static native int getThreadState(Thread t);
+ private static native Thread[] getAllThreads();
+}
diff --git a/test/924-threads/threads.cc b/test/924-threads/threads.cc
new file mode 100644
index 0000000000..1487b7c64d
--- /dev/null
+++ b/test/924-threads/threads.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include "android-base/stringprintf.h"
+#include "base/macros.h"
+#include "base/logging.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test924Threads {
+
+// private static native Thread getCurrentThread();
+// private static native Object[] getThreadInfo(Thread t);
+
+extern "C" JNIEXPORT jthread JNICALL Java_Main_getCurrentThread(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
+ jthread thread = nullptr;
+ jvmtiError result = jvmti_env->GetCurrentThread(&thread);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+ return thread;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getThreadInfo(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthread thread) {
+ jvmtiThreadInfo info;
+ memset(&info, 0, sizeof(jvmtiThreadInfo));
+
+ jvmtiError result = jvmti_env->GetThreadInfo(thread, &info);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+
+ auto callback = [&](jint component_index) -> jobject {
+ switch (component_index) {
+ // The name.
+ case 0:
+ return (info.name == nullptr) ? nullptr : env->NewStringUTF(info.name);
+
+ // The priority. Use a string for simplicity of construction.
+ case 1:
+ return env->NewStringUTF(android::base::StringPrintf("%d", info.priority).c_str());
+
+ // Whether it's a daemon. Use a string for simplicity of construction.
+ case 2:
+ return env->NewStringUTF(info.is_daemon == JNI_TRUE ? "true" : "false");
+
+ // The thread group;
+ case 3:
+ return env->NewLocalRef(info.thread_group);
+
+ // The context classloader.
+ case 4:
+ return env->NewLocalRef(info.context_class_loader);
+ }
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+ };
+ jobjectArray ret = CreateObjectArray(env, 5, "java/lang/Object", callback);
+
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(info.name));
+ if (info.thread_group != nullptr) {
+ env->DeleteLocalRef(info.thread_group);
+ }
+ if (info.context_class_loader != nullptr) {
+ env->DeleteLocalRef(info.context_class_loader);
+ }
+
+ return ret;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getThreadState(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthread thread) {
+ jint state;
+ jvmtiError result = jvmti_env->GetThreadState(thread, &state);
+ if (JvmtiErrorToException(env, result)) {
+ return 0;
+ }
+ return state;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getAllThreads(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
+ jint thread_count;
+ jthread* threads;
+
+ jvmtiError result = jvmti_env->GetAllThreads(&thread_count, &threads);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+
+ auto callback = [&](jint index) {
+ return threads[index];
+ };
+ jobjectArray ret = CreateObjectArray(env, thread_count, "java/lang/Thread", callback);
+
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(threads));
+
+ return ret;
+}
+
+} // namespace Test924Threads
+} // namespace art
diff --git a/test/926-multi-obsolescence/build b/test/926-multi-obsolescence/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/926-multi-obsolescence/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/926-multi-obsolescence/expected.txt b/test/926-multi-obsolescence/expected.txt
new file mode 100644
index 0000000000..0546490c44
--- /dev/null
+++ b/test/926-multi-obsolescence/expected.txt
@@ -0,0 +1,15 @@
+hello
+hello - 2
+Not doing anything here
+goodbye - 2
+goodbye
+hello
+hello - 2
+transforming calling functions
+goodbye - 2
+goodbye
+Hello - Transformed
+Hello 2 - Transformed
+Not doing anything here
+Goodbye 2 - Transformed
+Goodbye - Transformed
diff --git a/test/926-multi-obsolescence/info.txt b/test/926-multi-obsolescence/info.txt
new file mode 100644
index 0000000000..1399b966f1
--- /dev/null
+++ b/test/926-multi-obsolescence/info.txt
@@ -0,0 +1,2 @@
+Tests that we can redefine multiple classes at once using the RedefineClasses
+function.
diff --git a/test/926-multi-obsolescence/run b/test/926-multi-obsolescence/run
new file mode 100755
index 0000000000..4379349cb2
--- /dev/null
+++ b/test/926-multi-obsolescence/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --jvmti
diff --git a/test/923-monitors/monitors.h b/test/926-multi-obsolescence/src/CommonClassDefinition.java
index 14cd5cd633..62602a02e9 100644
--- a/test/923-monitors/monitors.h
+++ b/test/926-multi-obsolescence/src/CommonClassDefinition.java
@@ -14,17 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_TEST_923_MONITORS_MONITORS_H_
-#define ART_TEST_923_MONITORS_MONITORS_H_
+class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
-#include <jni.h>
-
-namespace art {
-namespace Test923Monitors {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test923Monitors
-} // namespace art
-
-#endif // ART_TEST_923_MONITORS_MONITORS_H_
+ CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+}
diff --git a/test/926-multi-obsolescence/src/Main.java b/test/926-multi-obsolescence/src/Main.java
new file mode 100644
index 0000000000..8a6cf84b8b
--- /dev/null
+++ b/test/926-multi-obsolescence/src/Main.java
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.Base64;
+
+public class Main {
+ // class Transform {
+ // public void sayHi(Runnable r) {
+ // System.out.println("Hello - Transformed");
+ // r.run();
+ // System.out.println("Goodbye - Transformed");
+ // }
+ // }
+ private static CommonClassDefinition VALID_DEFINITION_T1 = new CommonClassDefinition(
+ Transform.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAJAoACAARCQASABMIABQKABUAFgsAFwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW" +
+ "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7" +
+ "KVYBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMAAkACgcAHAwAHQAeAQATSGVsbG8gLSBU" +
+ "cmFuc2Zvcm1lZAcAHwwAIAAhBwAiDAAjAAoBABVHb29kYnllIC0gVHJhbnNmb3JtZWQBAAlUcmFu" +
+ "c2Zvcm0BABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZh" +
+ "L2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZh" +
+ "L2xhbmcvU3RyaW5nOylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAABwAIAAAAAAACAAAA" +
+ "CQAKAAEACwAAAB0AAQABAAAABSq3AAGxAAAAAQAMAAAABgABAAAAAQABAA0ADgABAAsAAAA7AAIA" +
+ "AgAAABeyAAISA7YABCu5AAUBALIAAhIGtgAEsQAAAAEADAAAABIABAAAAAMACAAEAA4ABQAWAAYA" +
+ "AQAPAAAAAgAQ"),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQAYeAMMXgYWxoeSHAS9EWKCCtVRSAGpqZVQAwAAcAAAAHhWNBIAAAAAAAAAALACAAAR" +
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAUAAAD8AAAAAQAAACQBAAAMAgAARAEAAKIB" +
+ "AACqAQAAwQEAANYBAADjAQAA+gEAAA4CAAAkAgAAOAIAAEwCAABcAgAAXwIAAGMCAAB3AgAAfAIA" +
+ "AIUCAACKAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA" +
+ "lAEAAAsAAAAGAAAAnAEAAAUAAQANAAAAAAAAAAAAAAAAAAEAEAAAAAEAAgAOAAAAAgAAAAAAAAAD" +
+ "AAAADwAAAAAAAAAAAAAAAgAAAAAAAAAJAAAAAAAAAJ8CAAAAAAAAAQABAAEAAACRAgAABAAAAHAQ" +
+ "AwAAAA4ABAACAAIAAACWAgAAFAAAAGIAAAAbAQIAAABuIAIAEAByEAQAAwBiAAAAGwEBAAAAbiAC" +
+ "ABAADgABAAAAAwAAAAEAAAAEAAY8aW5pdD4AFUdvb2RieWUgLSBUcmFuc2Zvcm1lZAATSGVsbG8g" +
+ "LSBUcmFuc2Zvcm1lZAALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEv" +
+ "bGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABJM" +
+ "amF2YS9sYW5nL1N5c3RlbTsADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00" +
+ "LjEzAANvdXQAB3ByaW50bG4AA3J1bgAFc2F5SGkAAQAHDgADAQAHDoc8hwAAAAEBAICABMQCAQHc" +
+ "AgAAAA0AAAAAAAAAAQAAAAAAAAABAAAAEQAAAHAAAAACAAAABwAAALQAAAADAAAAAwAAANAAAAAE" +
+ "AAAAAQAAAPQAAAAFAAAABQAAAPwAAAAGAAAAAQAAACQBAAABIAAAAgAAAEQBAAABEAAAAgAAAJQB" +
+ "AAACIAAAEQAAAKIBAAADIAAAAgAAAJECAAAAIAAAAQAAAJ8CAAAAEAAAAQAAALACAAA="));
+ // class Transform2 {
+ // public void sayHi(Runnable r) {
+ // System.out.println("Hello 2 - Transformed");
+ // r.run();
+ // System.out.println("Goodbye 2 - Transformed");
+ // }
+ // }
+ private static CommonClassDefinition VALID_DEFINITION_T2 = new CommonClassDefinition(
+ Transform2.class,
+ Base64.getDecoder().decode(
+ "yv66vgAAADQAJAoACAARCQASABMIABQKABUAFgsAFwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW" +
+ "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7" +
+ "KVYBAApTb3VyY2VGaWxlAQAPVHJhbnNmb3JtMi5qYXZhDAAJAAoHABwMAB0AHgEAFUhlbGxvIDIg" +
+ "LSBUcmFuc2Zvcm1lZAcAHwwAIAAhBwAiDAAjAAoBABdHb29kYnllIDIgLSBUcmFuc2Zvcm1lZAEA" +
+ "ClRyYW5zZm9ybTIBABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEA" +
+ "FUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAV" +
+ "KExqYXZhL2xhbmcvU3RyaW5nOylWAQASamF2YS9sYW5nL1J1bm5hYmxlAQADcnVuACAABwAIAAAA" +
+ "AAACAAAACQAKAAEACwAAAB0AAQABAAAABSq3AAGxAAAAAQAMAAAABgABAAAAAQABAA0ADgABAAsA" +
+ "AAA7AAIAAgAAABeyAAISA7YABCu5AAUBALIAAhIGtgAEsQAAAAEADAAAABIABAAAAAMACAAEAA4A" +
+ "BQAWAAYAAQAPAAAAAgAQ"),
+ Base64.getDecoder().decode(
+ "ZGV4CjAzNQCee5Z6+AuFcjnPjjn7QYgZmKSmFQCO4nxUAwAAcAAAAHhWNBIAAAAAAAAAALQCAAAR" +
+ "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAUAAAD8AAAAAQAAACQBAAAQAgAARAEAAKIB" +
+ "AACqAQAAwwEAANoBAADoAQAA/wEAABMCAAApAgAAPQIAAFECAABiAgAAZQIAAGkCAAB9AgAAggIA" +
+ "AIsCAACQAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA" +
+ "lAEAAAsAAAAGAAAAnAEAAAUAAQANAAAAAAAAAAAAAAAAAAEAEAAAAAEAAgAOAAAAAgAAAAAAAAAD" +
+ "AAAADwAAAAAAAAAAAAAAAgAAAAAAAAAJAAAAAAAAAKUCAAAAAAAAAQABAAEAAACXAgAABAAAAHAQ" +
+ "AwAAAA4ABAACAAIAAACcAgAAFAAAAGIAAAAbAQIAAABuIAIAEAByEAQAAwBiAAAAGwEBAAAAbiAC" +
+ "ABAADgABAAAAAwAAAAEAAAAEAAY8aW5pdD4AF0dvb2RieWUgMiAtIFRyYW5zZm9ybWVkABVIZWxs" +
+ "byAyIC0gVHJhbnNmb3JtZWQADExUcmFuc2Zvcm0yOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJM" +
+ "amF2YS9sYW5nL09iamVjdDsAFExqYXZhL2xhbmcvUnVubmFibGU7ABJMamF2YS9sYW5nL1N0cmlu" +
+ "ZzsAEkxqYXZhL2xhbmcvU3lzdGVtOwAPVHJhbnNmb3JtMi5qYXZhAAFWAAJWTAASZW1pdHRlcjog" +
+ "amFjay00LjIwAANvdXQAB3ByaW50bG4AA3J1bgAFc2F5SGkAAQAHDgADAQAHDoc8hwAAAAEBAICA" +
+ "BMQCAQHcAgANAAAAAAAAAAEAAAAAAAAAAQAAABEAAABwAAAAAgAAAAcAAAC0AAAAAwAAAAMAAADQ" +
+ "AAAABAAAAAEAAAD0AAAABQAAAAUAAAD8AAAABgAAAAEAAAAkAQAAASAAAAIAAABEAQAAARAAAAIA" +
+ "AACUAQAAAiAAABEAAACiAQAAAyAAAAIAAACXAgAAACAAAAEAAAClAgAAABAAAAEAAAC0AgAA"));
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+ doTest(new Transform(), new Transform2());
+ }
+
+ public static void doTest(final Transform t1, final Transform2 t2) {
+ t1.sayHi(() -> { t2.sayHi(() -> { System.out.println("Not doing anything here"); }); });
+ t1.sayHi(() -> {
+ t2.sayHi(() -> {
+ System.out.println("transforming calling functions");
+ doMultiClassRedefinition(VALID_DEFINITION_T1, VALID_DEFINITION_T2);
+ });
+ });
+ t1.sayHi(() -> { t2.sayHi(() -> { System.out.println("Not doing anything here"); }); });
+ }
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+}
diff --git a/test/905-object-free/tracking_free.h b/test/926-multi-obsolescence/src/Transform.java
index ba4aa43ffe..8cda6cdf53 100644
--- a/test/905-object-free/tracking_free.h
+++ b/test/926-multi-obsolescence/src/Transform.java
@@ -14,17 +14,17 @@
* limitations under the License.
*/
-#ifndef ART_TEST_905_OBJECT_FREE_TRACKING_FREE_H_
-#define ART_TEST_905_OBJECT_FREE_TRACKING_FREE_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test905ObjectFree {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test905ObjectFree
-} // namespace art
-
-#endif // ART_TEST_905_OBJECT_FREE_TRACKING_FREE_H_
+class Transform {
+ public void sayHi(Runnable r) {
+ // Use lower 'h' to make sure the string will have a different string id
+ // than the transformation (the transformation code is the same except
+ // the actual printed String, which was making the test inacurately passing
+ // in JIT mode when loading the string from the dex cache, as the string ids
+ // of the two different strings were the same).
+ // We know the string ids will be different because lexicographically:
+ // "Hello" < "LTransform;" < "hello".
+ System.out.println("hello");
+ r.run();
+ System.out.println("goodbye");
+ }
+}
diff --git a/test/913-heaps/heaps.h b/test/926-multi-obsolescence/src/Transform2.java
index bd828aca33..4877f8455b 100644
--- a/test/913-heaps/heaps.h
+++ b/test/926-multi-obsolescence/src/Transform2.java
@@ -14,17 +14,10 @@
* limitations under the License.
*/
-#ifndef ART_TEST_913_HEAPS_HEAPS_H_
-#define ART_TEST_913_HEAPS_HEAPS_H_
-
-#include <jni.h>
-
-namespace art {
-namespace Test913Heaps {
-
-jint OnLoad(JavaVM* vm, char* options, void* reserved);
-
-} // namespace Test913Heaps
-} // namespace art
-
-#endif // ART_TEST_913_HEAPS_HEAPS_H_
+class Transform2 {
+ public void sayHi(Runnable r) {
+ System.out.println("hello - 2");
+ r.run();
+ System.out.println("goodbye - 2");
+ }
+}
diff --git a/test/953-invoke-polymorphic-compiler/build b/test/953-invoke-polymorphic-compiler/build
new file mode 100755
index 0000000000..a423ca6b4e
--- /dev/null
+++ b/test/953-invoke-polymorphic-compiler/build
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ != *"--jvm"* ]]; then
+ # Don't do anything with jvm.
+ export USE_JACK=true
+fi
+
+./default-build "$@" --experimental method-handles
diff --git a/test/953-invoke-polymorphic-compiler/expected.txt b/test/953-invoke-polymorphic-compiler/expected.txt
new file mode 100644
index 0000000000..f47ee23fd8
--- /dev/null
+++ b/test/953-invoke-polymorphic-compiler/expected.txt
@@ -0,0 +1,25 @@
+Running Main.Min2Print2([33, -4])
+Running Main.Min2Print2([-4, 33])
+Running Main.Min2Print3([33, -4, 17])
+Running Main.Min2Print3([-4, 17, 33])
+Running Main.Min2Print3([17, 33, -4])
+Running Main.Min2Print6([33, -4, 77, 88, 99, 111])
+Running Main.Min2Print6([-4, 77, 88, 99, 111, 33])
+Running Main.Min2Print6([77, 88, 99, 111, 33, -4])
+Running Main.Min2Print6([88, 99, 111, 33, -4, 77])
+Running Main.Min2Print6([99, 111, 33, -4, 77, 88])
+Running Main.Min2Print6([111, 33, -4, 77, 88, 99])
+Running Main.Min2Print26([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25])
+Running Main.Min2Print26([25, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24])
+Running Main.Min2Print26([24, 25, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
+BasicTest done.
+$opt$ReturnBooleanTest done.
+$opt$ReturnCharTest done.
+$opt$ReturnByteTest done.
+$opt$ReturnShortTest done.
+$opt$ReturnIntTest done.
+$opt$ReturnLongTest done.
+$opt$ReturnFloatTest done.
+$opt$ReturnDoubleTest done.
+$opt$ReturnStringTest done.
+ReturnValuesTest done.
diff --git a/test/953-invoke-polymorphic-compiler/info.txt b/test/953-invoke-polymorphic-compiler/info.txt
new file mode 100644
index 0000000000..f1dbb61640
--- /dev/null
+++ b/test/953-invoke-polymorphic-compiler/info.txt
@@ -0,0 +1,3 @@
+Tests for method handle invocations.
+
+NOTE: needs to run under ART or a Java 8 Language runtime and compiler.
diff --git a/test/953-invoke-polymorphic-compiler/run b/test/953-invoke-polymorphic-compiler/run
new file mode 100755
index 0000000000..a9f182288c
--- /dev/null
+++ b/test/953-invoke-polymorphic-compiler/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+./default-run "$@" --experimental method-handles
diff --git a/test/953-invoke-polymorphic-compiler/src/Main.java b/test/953-invoke-polymorphic-compiler/src/Main.java
new file mode 100644
index 0000000000..20a8fec112
--- /dev/null
+++ b/test/953-invoke-polymorphic-compiler/src/Main.java
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodHandles.Lookup;
+import java.lang.invoke.MethodType;
+import java.lang.invoke.WrongMethodTypeException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class Main {
+ public static void assertTrue(boolean value) {
+ if (!value) {
+ throw new AssertionError("assertTrue value: " + value);
+ }
+ }
+
+ public static void assertFalse(boolean value) {
+ if (value) {
+ throw new AssertionError("assertTrue value: " + value);
+ }
+ }
+
+ public static void assertEquals(int i1, int i2) {
+ if (i1 == i2) { return; }
+ throw new AssertionError("assertEquals i1: " + i1 + ", i2: " + i2);
+ }
+
+ public static void assertEquals(long i1, long i2) {
+ if (i1 == i2) { return; }
+ throw new AssertionError("assertEquals l1: " + i1 + ", l2: " + i2);
+ }
+
+ public static void assertEquals(Object o, Object p) {
+ if (o == p) { return; }
+ if (o != null && p != null && o.equals(p)) { return; }
+ throw new AssertionError("assertEquals: o1: " + o + ", o2: " + p);
+ }
+
+ public static void assertEquals(String s1, String s2) {
+ if (s1 == s2) {
+ return;
+ }
+
+ if (s1 != null && s2 != null && s1.equals(s2)) {
+ return;
+ }
+
+ throw new AssertionError("assertEquals s1: " + s1 + ", s2: " + s2);
+ }
+
+ public static void fail() {
+ System.err.println("fail");
+ Thread.dumpStack();
+ }
+
+ public static void fail(String message) {
+ System.err.println("fail: " + message);
+ Thread.dumpStack();
+ }
+
+ public static int Min2Print2(int a, int b) {
+ int[] values = new int[] { a, b };
+ System.err.println("Running Main.Min2Print2(" + Arrays.toString(values) + ")");
+ return a > b ? a : b;
+ }
+
+ public static int Min2Print3(int a, int b, int c) {
+ int[] values = new int[] { a, b, c };
+ System.err.println("Running Main.Min2Print3(" + Arrays.toString(values) + ")");
+ return a > b ? a : b;
+ }
+
+ public static int Min2Print6(int a, int b, int c, int d, int e, int f) {
+ int[] values = new int[] { a, b, c, d, e, f };
+ System.err.println("Running Main.Min2Print6(" + Arrays.toString(values) + ")");
+ return a > b ? a : b;
+ }
+
+ public static int Min2Print26(int a, int b, int c, int d,
+ int e, int f, int g, int h,
+ int i, int j, int k, int l,
+ int m, int n, int o, int p,
+ int q, int r, int s, int t,
+ int u, int v, int w, int x,
+ int y, int z) {
+ int[] values = new int[] { a, b, c, d, e, f, g, h, i, j, k, l, m,
+ n, o, p, q, r, s, t, u, v, w, x, y, z };
+ System.err.println("Running Main.Min2Print26(" + Arrays.toString(values) + ")");
+ return a > b ? a : b;
+ }
+
+ public static void $opt$BasicTest() throws Throwable {
+ MethodHandle mh;
+ mh = MethodHandles.lookup().findStatic(
+ Main.class, "Min2Print2", MethodType.methodType(int.class, int.class, int.class));
+ assertEquals((int) mh.invokeExact(33, -4), 33);
+ assertEquals((int) mh.invokeExact(-4, 33), 33);
+
+ mh = MethodHandles.lookup().findStatic(
+ Main.class, "Min2Print3",
+ MethodType.methodType(int.class, int.class, int.class, int.class));
+ assertEquals((int) mh.invokeExact(33, -4, 17), 33);
+ assertEquals((int) mh.invokeExact(-4, 17, 33), 17);
+ assertEquals((int) mh.invokeExact(17, 33, -4), 33);
+
+ mh = MethodHandles.lookup().findStatic(
+ Main.class, "Min2Print6",
+ MethodType.methodType(
+ int.class, int.class, int.class, int.class, int.class, int.class, int.class));
+ assertEquals((int) mh.invokeExact(33, -4, 77, 88, 99, 111), 33);
+ try {
+ // Too few arguments
+ assertEquals((int) mh.invokeExact(33, -4, 77, 88), 33);
+ fail("No WMTE for too few arguments");
+ } catch (WrongMethodTypeException e) {}
+ try {
+ // Too many arguments
+ assertEquals((int) mh.invokeExact(33, -4, 77, 88, 89, 90, 91), 33);
+ fail("No WMTE for too many arguments");
+ } catch (WrongMethodTypeException e) {}
+ assertEquals((int) mh.invokeExact(-4, 77, 88, 99, 111, 33), 77);
+ assertEquals((int) mh.invokeExact(77, 88, 99, 111, 33, -4), 88);
+ assertEquals((int) mh.invokeExact(88, 99, 111, 33, -4, 77), 99);
+ assertEquals((int) mh.invokeExact(99, 111, 33, -4, 77, 88), 111);
+ assertEquals((int) mh.invokeExact(111, 33, -4, 77, 88, 99), 111);
+
+ // A preposterous number of arguments.
+ mh = MethodHandles.lookup().findStatic(
+ Main.class, "Min2Print26",
+ MethodType.methodType(
+ // Return-type
+ int.class,
+ // Arguments
+ int.class, int.class, int.class, int.class, int.class, int.class, int.class, int.class,
+ int.class, int.class, int.class, int.class, int.class, int.class, int.class, int.class,
+ int.class, int.class, int.class, int.class, int.class, int.class, int.class, int.class,
+ int.class, int.class));
+ assertEquals(1, (int) mh.invokeExact(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25));
+ assertEquals(25, (int) mh.invokeExact(25, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24));
+ assertEquals(25, (int) mh.invokeExact(24, 25, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23));
+
+ try {
+ // Wrong argument type
+ mh.invokeExact("a");
+ fail("No WMTE for wrong arguments");
+ } catch (WrongMethodTypeException wmte) {}
+
+ try {
+ // Invoke on null handle.
+ MethodHandle mh0 = null;
+ mh0.invokeExact("bad");
+ fail("No NPE for you");
+ } catch (NullPointerException npe) {}
+
+ System.err.println("BasicTest done.");
+ }
+
+ private static boolean And(boolean lhs, boolean rhs) {
+ return lhs & rhs;
+ }
+
+ private static boolean Xor(boolean lhs, boolean rhs) {
+ return lhs ^ rhs;
+ }
+
+ private static String Multiply(String value, int n) {
+ String result = "";
+ for (int i = 0; i < n; ++i) {
+ result = value + result;
+ }
+ return result;
+ }
+
+ private static byte Multiply(byte value, byte n) {
+ return (byte)(value * n);
+ }
+
+ private static short Multiply(short value, short n) {
+ return (short)(value * n);
+ }
+
+ private static int Multiply(int value, int n) {
+ return value * n;
+ }
+
+ private static long Multiply(long value, long n) {
+ return value * n;
+ }
+
+ private static float Multiply(float value, float n) {
+ return value * n;
+ }
+
+ private static double Multiply(double value, double n) {
+ return value * n;
+ }
+
+ private static char Next(char c) {
+ return (char)(c + 1);
+ }
+
+ public static void $opt$ReturnBooleanTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh =
+ lookup.findStatic(Main.class, "And",
+ MethodType.methodType(boolean.class, boolean.class, boolean.class));
+ assertEquals(true, (boolean) mh.invokeExact(true, true));
+ assertEquals(false, (boolean) mh.invokeExact(true, false));
+ assertEquals(false, (boolean) mh.invokeExact(false, true));
+ assertEquals(false, (boolean) mh.invokeExact(false, false));
+ assertEquals(true, (boolean) mh.invoke(true, true));
+ assertEquals(false, (boolean) mh.invoke(true, false));
+ assertEquals(false, (boolean) mh.invoke(false, true));
+ assertEquals(false, (boolean) mh.invoke(false, false));
+
+ mh = lookup.findStatic(Main.class, "Xor",
+ MethodType.methodType(boolean.class, boolean.class, boolean.class));
+ assertEquals(false, (boolean) mh.invokeExact(true, true));
+ assertEquals(true, (boolean) mh.invokeExact(true, false));
+ assertEquals(true, (boolean) mh.invokeExact(false, true));
+ assertEquals(false, (boolean) mh.invokeExact(false, false));
+ assertEquals(false, (boolean) mh.invoke(true, true));
+ assertEquals(true, (boolean) mh.invoke(true, false));
+ assertEquals(true, (boolean) mh.invoke(false, true));
+ assertEquals(false, (boolean) mh.invoke(false, false));
+
+ System.err.println("$opt$ReturnBooleanTest done.");
+ }
+
+ public static void $opt$ReturnCharTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Next",
+ MethodType.methodType(char.class, char.class));
+ assertEquals('B', (char) mh.invokeExact('A'));
+ assertEquals((char) -55, (char) mh.invokeExact((char) -56));
+ System.err.println("$opt$ReturnCharTest done.");
+ }
+
+ public static void $opt$ReturnByteTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(byte.class, byte.class, byte.class));
+ assertEquals((byte) 30, (byte) mh.invokeExact((byte) 10, (byte) 3));
+ assertEquals((byte) -90, (byte) mh.invoke((byte) -10, (byte) 9));
+ System.err.println("$opt$ReturnByteTest done.");
+ }
+
+ public static void $opt$ReturnShortTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(short.class, short.class, short.class));
+ assertEquals((short) 3000, (short) mh.invokeExact((short) 1000, (short) 3));
+ assertEquals((short) -3000, (short) mh.invoke((short) -1000, (short) 3));
+ System.err.println("$opt$ReturnShortTest done.");
+ }
+
+ public static void $opt$ReturnIntTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(int.class, int.class, int.class));
+ assertEquals(3_000_000, (int) mh.invokeExact(1_000_000, 3));
+ assertEquals(-3_000_000, (int) mh.invoke(-1_000, 3_000));
+ System.err.println("$opt$ReturnIntTest done.");
+ }
+
+ public static void $opt$ReturnLongTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(long.class, long.class, long.class));
+ assertEquals(4_294_967_295_000L, (long) mh.invokeExact(1000L, 4_294_967_295L));
+ assertEquals(-4_294_967_295_000L, (long) mh.invoke(-1000L, 4_294_967_295L));
+ System.err.println("$opt$ReturnLongTest done.");
+ }
+
+ public static void $opt$ReturnFloatTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(float.class, float.class, float.class));
+ assertEquals(3.0F, (float) mh.invokeExact(1000.0F, 3e-3F));
+ assertEquals(-3.0F, (float) mh.invoke(-1000.0F, 3e-3F));
+ System.err.println("$opt$ReturnFloatTest done.");
+ }
+
+ public static void $opt$ReturnDoubleTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(double.class, double.class, double.class));
+ assertEquals(3033000.0, (double) mh.invokeExact(1000.0, 3.033e3));
+ assertEquals(-3033000.0, (double) mh.invoke(-1000.0, 3.033e3));
+ System.err.println("$opt$ReturnDoubleTest done.");
+ }
+
+ public static void $opt$ReturnStringTest() throws Throwable {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodHandle mh = lookup.findStatic(Main.class, "Multiply",
+ MethodType.methodType(String.class, String.class, int.class));
+ assertEquals("100010001000", (String) mh.invokeExact("1000", 3));
+ assertEquals("100010001000", (String) mh.invoke("1000", 3));
+ System.err.println("$opt$ReturnStringTest done.");
+ }
+
+ public static void ReturnValuesTest() throws Throwable {
+ $opt$ReturnBooleanTest();
+ $opt$ReturnCharTest();
+ $opt$ReturnByteTest();
+ $opt$ReturnShortTest();
+ $opt$ReturnIntTest();
+ $opt$ReturnLongTest();
+ $opt$ReturnFloatTest();
+ $opt$ReturnDoubleTest();
+ $opt$ReturnStringTest();
+ System.err.println("ReturnValuesTest done.");
+ }
+
+ static class ValueHolder {
+ public boolean m_z;
+ public static boolean s_z;
+ }
+
+ public static void $opt$AccessorsTest() throws Throwable {
+ ValueHolder valueHolder = new ValueHolder();
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+
+ MethodHandle setMember = lookup.findSetter(ValueHolder.class, "m_z", boolean.class);
+ MethodHandle getMember = lookup.findGetter(ValueHolder.class, "m_z", boolean.class);
+ MethodHandle setStatic = lookup.findStaticSetter(ValueHolder.class, "s_z", boolean.class);
+ MethodHandle getStatic = lookup.findStaticGetter(ValueHolder.class, "s_z", boolean.class);
+
+ boolean [] values = { false, true, false, true, false };
+ for (boolean value : values) {
+ assertEquals((boolean) getStatic.invoke(), ValueHolder.s_z);
+ setStatic.invoke(value);
+ ValueHolder.s_z = value;
+ assertEquals(ValueHolder.s_z, value);
+ assertEquals((boolean) getStatic.invoke(), value);
+
+ assertEquals((boolean) getMember.invoke(valueHolder), valueHolder.m_z);
+ setMember.invoke(valueHolder, value);
+ valueHolder.m_z = value;
+ assertEquals(valueHolder.m_z, value);
+ assertEquals((boolean) getMember.invoke(valueHolder), value);
+ }
+ }
+
+ public static void main(String[] args) throws Throwable {
+ $opt$BasicTest();
+ ReturnValuesTest();
+ $opt$AccessorsTest();
+ }
+}
diff --git a/test/957-methodhandle-transforms/src/Main.java b/test/957-methodhandle-transforms/src/Main.java
index 5806509fc3..9e79ff4c10 100644
--- a/test/957-methodhandle-transforms/src/Main.java
+++ b/test/957-methodhandle-transforms/src/Main.java
@@ -40,17 +40,17 @@ public class Main {
IllegalArgumentException.class);
if (handle.type().returnType() != String.class) {
- System.out.println("Unexpected return type for handle: " + handle +
+ fail("Unexpected return type for handle: " + handle +
" [ " + handle.type() + "]");
}
final IllegalArgumentException iae = new IllegalArgumentException("boo!");
try {
handle.invoke(iae);
- System.out.println("Expected an exception of type: java.lang.IllegalArgumentException");
+ fail("Expected an exception of type: java.lang.IllegalArgumentException");
} catch (IllegalArgumentException expected) {
if (expected != iae) {
- System.out.println("Wrong exception: expected " + iae + " but was " + expected);
+ fail("Wrong exception: expected " + iae + " but was " + expected);
}
}
}
@@ -262,7 +262,7 @@ public class Main {
array[0] = 42;
int value = (int) getter.invoke(array, 0);
if (value != 42) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
try {
@@ -284,7 +284,7 @@ public class Main {
array[0] = 42;
long value = (long) getter.invoke(array, 0);
if (value != 42l) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -294,7 +294,7 @@ public class Main {
array[0] = 42;
short value = (short) getter.invoke(array, 0);
if (value != 42l) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -304,7 +304,7 @@ public class Main {
array[0] = 42;
char value = (char) getter.invoke(array, 0);
if (value != 42l) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -314,7 +314,7 @@ public class Main {
array[0] = (byte) 0x8;
byte value = (byte) getter.invoke(array, 0);
if (value != (byte) 0x8) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -324,7 +324,7 @@ public class Main {
array[0] = true;
boolean value = (boolean) getter.invoke(array, 0);
if (!value) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -334,7 +334,7 @@ public class Main {
array[0] = 42.0f;
float value = (float) getter.invoke(array, 0);
if (value != 42.0f) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -344,7 +344,7 @@ public class Main {
array[0] = 42.0;
double value = (double) getter.invoke(array, 0);
if (value != 42.0) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -372,10 +372,10 @@ public class Main {
setter.invoke(array, 1, 43);
if (array[0] != 42) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
if (array[1] != 43) {
- System.out.println("Unexpected value: " + array[1]);
+ fail("Unexpected value: " + array[1]);
}
try {
@@ -396,7 +396,7 @@ public class Main {
long[] array = new long[1];
setter.invoke(array, 0, 42l);
if (array[0] != 42l) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -405,7 +405,7 @@ public class Main {
short[] array = new short[1];
setter.invoke(array, 0, (short) 42);
if (array[0] != 42l) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -414,7 +414,7 @@ public class Main {
char[] array = new char[1];
setter.invoke(array, 0, (char) 42);
if (array[0] != 42) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -423,7 +423,7 @@ public class Main {
byte[] array = new byte[1];
setter.invoke(array, 0, (byte) 0x8);
if (array[0] != (byte) 0x8) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -432,7 +432,7 @@ public class Main {
boolean[] array = new boolean[1];
setter.invoke(array, 0, true);
if (!array[0]) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -441,7 +441,7 @@ public class Main {
float[] array = new float[1];
setter.invoke(array, 0, 42.0f);
if (array[0] != 42.0f) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -450,7 +450,7 @@ public class Main {
double[] array = new double[1];
setter.invoke(array, 0, 42.0);
if (array[0] != 42.0) {
- System.out.println("Unexpected value: " + array[0]);
+ fail("Unexpected value: " + array[0]);
}
}
@@ -471,7 +471,7 @@ public class Main {
MethodHandle identity = MethodHandles.identity(boolean.class);
boolean value = (boolean) identity.invoke(false);
if (value) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -479,7 +479,7 @@ public class Main {
MethodHandle identity = MethodHandles.identity(byte.class);
byte value = (byte) identity.invoke((byte) 0x8);
if (value != (byte) 0x8) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -487,7 +487,7 @@ public class Main {
MethodHandle identity = MethodHandles.identity(char.class);
char value = (char) identity.invoke((char) -56);
if (value != (char) -56) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -495,7 +495,7 @@ public class Main {
MethodHandle identity = MethodHandles.identity(short.class);
short value = (short) identity.invoke((short) -59);
if (value != (short) -59) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + Short.toString(value));
}
}
@@ -503,7 +503,7 @@ public class Main {
MethodHandle identity = MethodHandles.identity(int.class);
int value = (int) identity.invoke(52);
if (value != 52) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -511,7 +511,7 @@ public class Main {
MethodHandle identity = MethodHandles.identity(long.class);
long value = (long) identity.invoke(-76l);
if (value != (long) -76) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -519,7 +519,7 @@ public class Main {
MethodHandle identity = MethodHandles.identity(float.class);
float value = (float) identity.invoke(56.0f);
if (value != (float) 56.0f) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -527,7 +527,7 @@ public class Main {
MethodHandle identity = MethodHandles.identity(double.class);
double value = (double) identity.invoke((double) 72.0);
if (value != (double) 72.0) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -544,28 +544,28 @@ public class Main {
MethodHandle constant = MethodHandles.constant(int.class, 56);
int value = (int) constant.invoke();
if (value != 56) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
// short constant values are converted to int.
constant = MethodHandles.constant(int.class, (short) 52);
value = (int) constant.invoke();
if (value != 52) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
// char constant values are converted to int.
constant = MethodHandles.constant(int.class, (char) 'b');
value = (int) constant.invoke();
if (value != (int) 'b') {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
// int constant values are converted to int.
constant = MethodHandles.constant(int.class, (byte) 0x1);
value = (int) constant.invoke();
if (value != 1) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
// boolean, float, double and long primitive constants are not convertible
@@ -600,13 +600,13 @@ public class Main {
MethodHandle constant = MethodHandles.constant(long.class, 56l);
long value = (long) constant.invoke();
if (value != 56l) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
constant = MethodHandles.constant(long.class, (int) 56);
value = (long) constant.invoke();
if (value != 56l) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -615,7 +615,7 @@ public class Main {
MethodHandle constant = MethodHandles.constant(byte.class, (byte) 0x12);
byte value = (byte) constant.invoke();
if (value != (byte) 0x12) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -624,7 +624,7 @@ public class Main {
MethodHandle constant = MethodHandles.constant(boolean.class, true);
boolean value = (boolean) constant.invoke();
if (!value) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -633,7 +633,7 @@ public class Main {
MethodHandle constant = MethodHandles.constant(char.class, 'f');
char value = (char) constant.invoke();
if (value != 'f') {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -642,7 +642,7 @@ public class Main {
MethodHandle constant = MethodHandles.constant(short.class, (short) 123);
short value = (short) constant.invoke();
if (value != (short) 123) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -651,7 +651,7 @@ public class Main {
MethodHandle constant = MethodHandles.constant(float.class, 56.0f);
float value = (float) constant.invoke();
if (value != 56.0f) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -660,7 +660,7 @@ public class Main {
MethodHandle constant = MethodHandles.constant(double.class, 256.0);
double value = (double) constant.invoke();
if (value != 256.0) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -678,13 +678,13 @@ public class Main {
char value = (char) stringCharAt.invoke("foo", 0);
if (value != 'f') {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
MethodHandle bound = stringCharAt.bindTo("foo");
value = (char) bound.invoke(0);
if (value != 'f') {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
try {
@@ -706,7 +706,7 @@ public class Main {
bound = integerParseInt.bindTo("78452");
int intValue = (int) bound.invoke();
if (intValue != 78452) {
- System.out.println("Unexpected value: " + intValue);
+ fail("Unexpected value: " + intValue);
}
}
@@ -745,11 +745,11 @@ public class Main {
boolean value = (boolean) adapter.invoke((int) 42);
if (!value) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
value = (boolean) adapter.invoke((int) 43);
if (value) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -764,7 +764,7 @@ public class Main {
int value = (int) adapter.invoke("56");
if (value != 57) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
@@ -779,7 +779,7 @@ public class Main {
int value = (int) adapter.invoke();
if (value != 42) {
- System.out.println("Unexpected value: " + value);
+ fail("Unexpected value: " + value);
}
}
}
@@ -791,7 +791,7 @@ public class Main {
return;
}
- System.out.println("Unexpected arguments: " + a + ", " + b + ", " + c
+ fail("Unexpected arguments: " + a + ", " + b + ", " + c
+ ", " + d + ", " + e + ", " + f + ", " + g + ", " + h);
}
@@ -800,7 +800,7 @@ public class Main {
return;
}
- System.out.println("Unexpected arguments: " + a + ", " + b);
+ fail("Unexpected arguments: " + a + ", " + b);
}
public static void testPermuteArguments() throws Throwable {
@@ -893,6 +893,11 @@ public class Main {
Thread.dumpStack();
}
+ public static void fail(String message) {
+ System.out.println("fail: " + message);
+ Thread.dumpStack();
+ }
+
public static void assertEquals(String s1, String s2) {
if (s1 == s2) {
return;
diff --git a/test/Android.bp b/test/Android.bp
index a223c3aa29..b0f0e5a98d 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -264,6 +264,7 @@ art_cc_defaults {
"920-objects/objects.cc",
"922-properties/properties.cc",
"923-monitors/monitors.cc",
+ "924-threads/threads.cc",
],
shared_libs: [
"libbase",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index fd3a897dae..60688760a4 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -228,9 +228,15 @@ endef # name-to-var
# Disable 153-reference-stress temporarily until a fix arrives. b/33389022.
# Disable 080-oom-fragmentation due to flakes. b/33795328
+# Disable 497-inlining-and-class-loader and 542-unresolved-access-check until
+# they are rewritten. These tests use a broken class loader that tries to
+# register a dex file that's already registered with a different loader.
+# b/34193123
ART_TEST_RUN_TEST_SKIP += \
153-reference-stress \
- 080-oom-fragmentation
+ 080-oom-fragmentation \
+ 497-inlining-and-class-loader \
+ 542-unresolved-access-check
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
@@ -296,6 +302,8 @@ TEST_ART_BROKEN_TARGET_TESTS += \
921-hello-failure \
922-properties \
923-monitors \
+ 924-threads \
+ 926-multi-obsolescence \
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -564,6 +572,7 @@ TEST_ART_BROKEN_JIT_RUN_TESTS := \
915-obsolete-2 \
917-fields-transformation \
919-obsolete-fields \
+ 926-multi-obsolescence \
ifneq (,$(filter jit,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -721,6 +730,16 @@ endif
TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS :=
+# Tests that check semantics for a non-debuggable app.
+TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS := \
+ 909-attach-agent \
+
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),debuggable,$(TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+
+TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS :=
+
# Tests incompatible with bisection bug search. Sorted by incompatibility reason.
# 000 through 595 do not compile anything. 089 tests a build failure. 018 through 137
# run dalvikvm more than once. 115 and 088 assume they are always compiled.
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 7451cf97de..1b6fc7033a 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -169,10 +169,9 @@ extern "C" JNIEXPORT void JNICALL Java_Main_ensureJitCompiled(JNIEnv* env,
}
jit::JitCodeCache* code_cache = jit->GetCodeCache();
- OatQuickMethodHeader* header = nullptr;
while (true) {
- header = OatQuickMethodHeader::FromEntryPoint(method->GetEntryPointFromQuickCompiledCode());
- if (code_cache->ContainsPc(header->GetCode())) {
+ const void* pc = method->GetEntryPointFromQuickCompiledCode();
+ if (code_cache->ContainsPc(pc)) {
break;
} else {
// Sleep to yield to the compiler thread.
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 8245947251..4794f6b7c1 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -322,6 +322,28 @@ if [ "$DEBUGGER" = "y" ]; then
DEBUGGER_OPTS="-agentlib:jdwp=transport=dt_socket,address=$PORT,server=y,suspend=y"
fi
+if [ "$IS_JVMTI_TEST" = "y" ]; then
+ plugin=libopenjdkjvmtid.so
+ agent=libtiagentd.so
+ lib=tiagentd
+ if [[ "$TEST_IS_NDEBUG" = "y" ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+ fi
+
+ ARGS="${ARGS} ${lib}"
+ if [[ "$USE_JVM" = "y" ]]; then
+ FLAGS="${FLAGS} -agentpath:${ANDROID_HOST_OUT}/nativetest64/${agent}=${TEST_NAME},jvm"
+ else
+ FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},art"
+ FLAGS="${FLAGS} -Xplugin:${plugin}"
+ FLAGS="${FLAGS} -Xfully-deoptable"
+ # Always make the compilation be debuggable.
+ COMPILE_FLAGS="${COMPILE_FLAGS} --debuggable"
+ fi
+fi
+
if [ "$USE_JVM" = "y" ]; then
export LD_LIBRARY_PATH=${ANDROID_HOST_OUT}/lib64
# Xmx is necessary since we don't pass down the ART flags to JVM.
@@ -387,28 +409,6 @@ if [ "$JIT" = "y" ]; then
fi
fi
-if [ "$IS_JVMTI_TEST" = "y" ]; then
- plugin=libopenjdkjvmtid.so
- agent=libtiagentd.so
- lib=tiagentd
- if [[ "$TEST_IS_NDEBUG" = "y" ]]; then
- agent=libtiagent.so
- plugin=libopenjdkjvmti.so
- lib=tiagent
- fi
-
- ARGS="${ARGS} ${lib}"
- if [[ "$USE_JVM" = "y" ]]; then
- FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},jvm"
- else
- FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},art"
- FLAGS="${FLAGS} -Xplugin:${plugin}"
- FLAGS="${FLAGS} -Xfully-deoptable"
- # Always make the compilation be debuggable.
- COMPILE_FLAGS="${COMPILE_FLAGS} --debuggable"
- fi
-fi
-
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
if [ "$RELOCATE" = "y" ]; then
diff --git a/test/run-test b/test/run-test
index ea9622aa69..abe73c3159 100755
--- a/test/run-test
+++ b/test/run-test
@@ -489,7 +489,7 @@ elif [ "$runtime" = "art" ]; then
fi
elif [ "$runtime" = "jvm" ]; then
# TODO: Detect whether the host is 32-bit or 64-bit.
- run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib64"
+ run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib64:${ANDROID_HOST_OUT}/nativetest64"
fi
if [ "$have_image" = "no" ]; then
diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc
index 6f98f10072..2c6d3eda00 100644
--- a/test/ti-agent/common_helper.cc
+++ b/test/ti-agent/common_helper.cc
@@ -62,48 +62,64 @@ bool JvmtiErrorToException(JNIEnv* env, jvmtiError error) {
namespace common_redefine {
-static void throwRedefinitionError(jvmtiEnv* jvmti, JNIEnv* env, jclass target, jvmtiError res) {
+static void throwRedefinitionError(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jint num_targets,
+ jclass* target,
+ jvmtiError res) {
std::stringstream err;
- char* signature = nullptr;
- char* generic = nullptr;
- jvmti->GetClassSignature(target, &signature, &generic);
char* error = nullptr;
jvmti->GetErrorName(res, &error);
- err << "Failed to redefine class <" << signature << "> due to " << error;
+ err << "Failed to redefine class";
+ if (num_targets > 1) {
+ err << "es";
+ }
+ err << " <";
+ for (jint i = 0; i < num_targets; i++) {
+ char* signature = nullptr;
+ char* generic = nullptr;
+ jvmti->GetClassSignature(target[i], &signature, &generic);
+ if (i != 0) {
+ err << ", ";
+ }
+ err << signature;
+ jvmti->Deallocate(reinterpret_cast<unsigned char*>(signature));
+ jvmti->Deallocate(reinterpret_cast<unsigned char*>(generic));
+ }
+ err << "> due to " << error;
std::string message = err.str();
- jvmti->Deallocate(reinterpret_cast<unsigned char*>(signature));
- jvmti->Deallocate(reinterpret_cast<unsigned char*>(generic));
jvmti->Deallocate(reinterpret_cast<unsigned char*>(error));
env->ThrowNew(env->FindClass("java/lang/Exception"), message.c_str());
}
-using RedefineDirectFunction = jvmtiError (*)(jvmtiEnv*, jclass, jint, const unsigned char*);
-static void DoClassTransformation(jvmtiEnv* jvmti_env,
- JNIEnv* env,
- jclass target,
- jbyteArray class_file_bytes,
- jbyteArray dex_file_bytes) {
- jbyteArray desired_array = IsJVM() ? class_file_bytes : dex_file_bytes;
- jint len = static_cast<jint>(env->GetArrayLength(desired_array));
- const unsigned char* redef_bytes = reinterpret_cast<const unsigned char*>(
- env->GetByteArrayElements(desired_array, nullptr));
- jvmtiError res;
- if (IsJVM()) {
- jvmtiClassDefinition def;
- def.klass = target;
- def.class_byte_count = static_cast<jint>(len);
- def.class_bytes = redef_bytes;
- res = jvmti_env->RedefineClasses(1, &def);
- } else {
- RedefineDirectFunction f =
- reinterpret_cast<RedefineDirectFunction>(jvmti_env->functions->reserved3);
- res = f(jvmti_env, target, len, redef_bytes);
+static void DoMultiClassRedefine(jvmtiEnv* jvmti_env,
+ JNIEnv* env,
+ jint num_redefines,
+ jclass* targets,
+ jbyteArray* class_file_bytes,
+ jbyteArray* dex_file_bytes) {
+ std::vector<jvmtiClassDefinition> defs;
+ for (jint i = 0; i < num_redefines; i++) {
+ jbyteArray desired_array = IsJVM() ? class_file_bytes[i] : dex_file_bytes[i];
+ jint len = static_cast<jint>(env->GetArrayLength(desired_array));
+ const unsigned char* redef_bytes = reinterpret_cast<const unsigned char*>(
+ env->GetByteArrayElements(desired_array, nullptr));
+ defs.push_back({targets[i], static_cast<jint>(len), redef_bytes});
}
+ jvmtiError res = jvmti_env->RedefineClasses(num_redefines, defs.data());
if (res != JVMTI_ERROR_NONE) {
- throwRedefinitionError(jvmti_env, env, target, res);
+ throwRedefinitionError(jvmti_env, env, num_redefines, targets, res);
}
}
+static void DoClassRedefine(jvmtiEnv* jvmti_env,
+ JNIEnv* env,
+ jclass target,
+ jbyteArray class_file_bytes,
+ jbyteArray dex_file_bytes) {
+ return DoMultiClassRedefine(jvmti_env, env, 1, &target, &class_file_bytes, &dex_file_bytes);
+}
+
// Magic JNI export that classes can use for redefining classes.
// To use classes should declare this as a native function with signature (Ljava/lang/Class;[B[B)V
extern "C" JNIEXPORT void JNICALL Java_Main_doCommonClassRedefinition(JNIEnv* env,
@@ -111,7 +127,38 @@ extern "C" JNIEXPORT void JNICALL Java_Main_doCommonClassRedefinition(JNIEnv* en
jclass target,
jbyteArray class_file_bytes,
jbyteArray dex_file_bytes) {
- DoClassTransformation(jvmti_env, env, target, class_file_bytes, dex_file_bytes);
+ DoClassRedefine(jvmti_env, env, target, class_file_bytes, dex_file_bytes);
+}
+
+// Magic JNI export that classes can use for redefining classes.
+// To use classes should declare this as a native function with signature
+// ([Ljava/lang/Class;[[B[[B)V
+extern "C" JNIEXPORT void JNICALL Java_Main_doCommonMultiClassRedefinition(
+ JNIEnv* env,
+ jclass,
+ jobjectArray targets,
+ jobjectArray class_file_bytes,
+ jobjectArray dex_file_bytes) {
+ std::vector<jclass> classes;
+ std::vector<jbyteArray> class_files;
+ std::vector<jbyteArray> dex_files;
+ jint len = env->GetArrayLength(targets);
+ if (len != env->GetArrayLength(class_file_bytes) || len != env->GetArrayLength(dex_file_bytes)) {
+ env->ThrowNew(env->FindClass("java/lang/IllegalArgumentException"),
+ "the three array arguments passed to this function have different lengths!");
+ return;
+ }
+ for (jint i = 0; i < len; i++) {
+ classes.push_back(static_cast<jclass>(env->GetObjectArrayElement(targets, i)));
+ dex_files.push_back(static_cast<jbyteArray>(env->GetObjectArrayElement(dex_file_bytes, i)));
+ class_files.push_back(static_cast<jbyteArray>(env->GetObjectArrayElement(class_file_bytes, i)));
+ }
+ return DoMultiClassRedefine(jvmti_env,
+ env,
+ len,
+ classes.data(),
+ class_files.data(),
+ dex_files.data());
}
// Don't do anything
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index 33e132103e..521e672330 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -26,21 +26,7 @@
#include "common_helper.h"
#include "901-hello-ti-agent/basics.h"
-#include "903-hello-tagging/tagging.h"
-#include "904-object-allocation/tracking.h"
-#include "905-object-free/tracking_free.h"
-#include "906-iterate-heap/iterate_heap.h"
-#include "907-get-loaded-classes/get_loaded_classes.h"
-#include "908-gc-start-finish/gc_callbacks.h"
#include "909-attach-agent/attach.h"
-#include "910-methods/methods.h"
-#include "911-get-stack-trace/stack_trace.h"
-#include "912-classes/classes.h"
-#include "913-heaps/heaps.h"
-#include "918-fields/fields.h"
-#include "920-objects/objects.h"
-#include "922-properties/properties.h"
-#include "923-monitors/monitors.h"
namespace art {
@@ -55,31 +41,31 @@ struct AgentLib {
OnAttach attach;
};
-// A list of all the agents we have for testing.
+// A trivial OnLoad implementation that only initializes the global jvmti_env.
+static jint MinimalOnLoad(JavaVM* vm,
+ char* options ATTRIBUTE_UNUSED,
+ void* reserved ATTRIBUTE_UNUSED) {
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ SetAllCapabilities(jvmti_env);
+ return 0;
+}
+
+// A list of all non-standard the agents we have for testing. All other agents will use
+// MinimalOnLoad.
AgentLib agents[] = {
{ "901-hello-ti-agent", Test901HelloTi::OnLoad, nullptr },
{ "902-hello-transformation", common_redefine::OnLoad, nullptr },
- { "903-hello-tagging", Test903HelloTagging::OnLoad, nullptr },
- { "904-object-allocation", Test904ObjectAllocation::OnLoad, nullptr },
- { "905-object-free", Test905ObjectFree::OnLoad, nullptr },
- { "906-iterate-heap", Test906IterateHeap::OnLoad, nullptr },
- { "907-get-loaded-classes", Test907GetLoadedClasses::OnLoad, nullptr },
- { "908-gc-start-finish", Test908GcStartFinish::OnLoad, nullptr },
{ "909-attach-agent", nullptr, Test909AttachAgent::OnAttach },
- { "910-methods", Test910Methods::OnLoad, nullptr },
- { "911-get-stack-trace", Test911GetStackTrace::OnLoad, nullptr },
- { "912-classes", Test912Classes::OnLoad, nullptr },
- { "913-heaps", Test913Heaps::OnLoad, nullptr },
{ "914-hello-obsolescence", common_redefine::OnLoad, nullptr },
{ "915-obsolete-2", common_redefine::OnLoad, nullptr },
{ "916-obsolete-jit", common_redefine::OnLoad, nullptr },
{ "917-fields-transformation", common_redefine::OnLoad, nullptr },
- { "918-fields", Test918Fields::OnLoad, nullptr },
{ "919-obsolete-fields", common_redefine::OnLoad, nullptr },
- { "920-objects", Test920Objects::OnLoad, nullptr },
{ "921-hello-failure", common_redefine::OnLoad, nullptr },
- { "922-properties", Test922Properties::OnLoad, nullptr },
- { "923-monitors", Test923Monitors::OnLoad, nullptr },
+ { "926-multi-obsolescence", common_redefine::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {
@@ -120,18 +106,21 @@ extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm, char* options, void*
printf("Unable to find agent name in options: %s\n", options);
return -1;
}
+
+ SetIsJVM(remaining_options);
+
AgentLib* lib = FindAgent(name_option);
+ OnLoad fn = nullptr;
if (lib == nullptr) {
- printf("Unable to find agent named: %s, add it to the list in test/ti-agent/common_load.cc\n",
- name_option);
- return -2;
- }
- if (lib->load == nullptr) {
- printf("agent: %s does not include an OnLoad method.\n", name_option);
- return -3;
+ fn = &MinimalOnLoad;
+ } else {
+ if (lib->load == nullptr) {
+ printf("agent: %s does not include an OnLoad method.\n", name_option);
+ return -3;
+ }
+ fn = lib->load;
}
- SetIsJVM(remaining_options);
- return lib->load(vm, remaining_options, reserved);
+ return fn(vm, remaining_options, reserved);
}
extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM* vm, char* options, void* reserved) {