summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc19
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc8
-rw-r--r--compiler/optimizing/builder.h6
-rw-r--r--compiler/optimizing/code_generator.cc88
-rw-r--r--compiler/optimizing/code_generator.h28
-rw-r--r--compiler/optimizing/code_generator_arm.cc210
-rw-r--r--compiler/optimizing/code_generator_arm.h14
-rw-r--r--compiler/optimizing/code_generator_arm64.cc214
-rw-r--r--compiler/optimizing/code_generator_arm64.h19
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc199
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h13
-rw-r--r--compiler/optimizing/code_generator_mips.cc185
-rw-r--r--compiler/optimizing/code_generator_mips.h8
-rw-r--r--compiler/optimizing/code_generator_mips64.cc168
-rw-r--r--compiler/optimizing/code_generator_mips64.h8
-rw-r--r--compiler/optimizing/code_generator_x86.cc202
-rw-r--r--compiler/optimizing/code_generator_x86.h17
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc188
-rw-r--r--compiler/optimizing/code_generator_x86_64.h17
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_arm.cc60
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_mips.cc20
-rw-r--r--compiler/optimizing/graph_visualizer.cc5
-rw-r--r--compiler/optimizing/gvn_test.cc38
-rw-r--r--compiler/optimizing/induction_var_range.cc17
-rw-r--r--compiler/optimizing/induction_var_range.h4
-rw-r--r--compiler/optimizing/induction_var_range_test.cc9
-rw-r--r--compiler/optimizing/inliner.cc67
-rw-r--r--compiler/optimizing/inliner.h2
-rw-r--r--compiler/optimizing/instruction_builder.cc242
-rw-r--r--compiler/optimizing/instruction_builder.h24
-rw-r--r--compiler/optimizing/instruction_simplifier.cc63
-rw-r--r--compiler/optimizing/intrinsics.cc2
-rw-r--r--compiler/optimizing/intrinsics_mips.cc53
-rw-r--r--compiler/optimizing/licm_test.cc11
-rw-r--r--compiler/optimizing/load_store_elimination.cc6
-rw-r--r--compiler/optimizing/loop_optimization.cc166
-rw-r--r--compiler/optimizing/loop_optimization.h8
-rw-r--r--compiler/optimizing/nodes.cc66
-rw-r--r--compiler/optimizing/nodes.h394
-rw-r--r--compiler/optimizing/nodes_test.cc8
-rw-r--r--compiler/optimizing/optimizing_compiler.cc2
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.cc3
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc2
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc47
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h1
-rw-r--r--compiler/optimizing/reference_type_propagation.cc304
-rw-r--r--compiler/optimizing/register_allocator_test.cc10
-rw-r--r--compiler/optimizing/sharpening.cc123
-rw-r--r--compiler/optimizing/sharpening.h2
-rw-r--r--compiler/optimizing/stack_map_stream.cc86
-rw-r--r--compiler/optimizing/stack_map_stream.h21
-rw-r--r--compiler/optimizing/stack_map_test.cc103
52 files changed, 1937 insertions, 1643 deletions
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 7dc094b25f..2ee4db923a 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -153,21 +153,6 @@ class ValueBound : public ValueObject {
return instruction_ == bound.instruction_ && constant_ == bound.constant_;
}
- /*
- * Hunt "under the hood" of array lengths (leading to array references),
- * null checks (also leading to array references), and new arrays
- * (leading to the actual length). This makes it more likely related
- * instructions become actually comparable.
- */
- static HInstruction* HuntForDeclaration(HInstruction* instruction) {
- while (instruction->IsArrayLength() ||
- instruction->IsNullCheck() ||
- instruction->IsNewArray()) {
- instruction = instruction->InputAt(0);
- }
- return instruction;
- }
-
static bool Equal(HInstruction* instruction1, HInstruction* instruction2) {
if (instruction1 == instruction2) {
return true;
@@ -1136,7 +1121,7 @@ class BCEVisitor : public HGraphVisitor {
}
void VisitNewArray(HNewArray* new_array) OVERRIDE {
- HInstruction* len = new_array->InputAt(0);
+ HInstruction* len = new_array->GetLength();
if (!len->IsIntConstant()) {
HInstruction *left;
int32_t right_const;
@@ -1324,7 +1309,7 @@ class BCEVisitor : public HGraphVisitor {
InductionVarRange::Value v2;
bool needs_finite_test = false;
HInstruction* index = context->InputAt(0);
- HInstruction* hint = ValueBound::HuntForDeclaration(context->InputAt(1));
+ HInstruction* hint = HuntForDeclaration(context->InputAt(1));
if (induction_range_.GetInductionRange(context, index, hint, &v1, &v2, &needs_finite_test)) {
if (v1.is_known && (v1.a_constant == 0 || v1.a_constant == 1) &&
v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index dfa150406d..5d58207511 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -596,13 +596,11 @@ static HInstruction* BuildSSAGraph3(HGraph* graph,
HBasicBlock* block = new (allocator) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
+ // We pass a bogus constant for the class to avoid mocking one.
HInstruction* new_array = new (allocator) HNewArray(
constant_10,
- graph->GetCurrentMethod(),
- 0,
- dex::TypeIndex(static_cast<uint16_t>(Primitive::kPrimInt)),
- graph->GetDexFile(),
- kQuickAllocArray);
+ constant_10,
+ 0);
block->AddInstruction(new_array);
block->AddInstruction(new (allocator) HGoto());
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index f896f1199e..8cf4089eba 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -63,7 +63,8 @@ class HGraphBuilder : public ValueObject {
driver,
interpreter_metadata,
compiler_stats,
- dex_cache) {}
+ dex_cache,
+ handles) {}
// Only for unit testing.
HGraphBuilder(HGraph* graph,
@@ -90,7 +91,8 @@ class HGraphBuilder : public ValueObject {
/* compiler_driver */ nullptr,
/* interpreter_metadata */ nullptr,
/* compiler_stats */ nullptr,
- null_dex_cache_) {}
+ null_dex_cache_,
+ handles) {}
GraphAnalysisResult BuildGraph();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 402eeee65f..99427f05da 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -367,6 +367,12 @@ void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invok
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
}
+void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
+ MoveConstant(invoke->GetLocations()->GetTemp(0), static_cast<int32_t>(invoke->GetType()));
+ QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
+ InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
+}
+
void CodeGenerator::CreateUnresolvedFieldLocationSummary(
HInstruction* field_access,
Primitive::Type field_type,
@@ -491,31 +497,34 @@ void CodeGenerator::GenerateUnresolvedFieldAccess(
}
}
-// TODO: Remove argument `code_generator_supports_read_barrier` when
-// all code generators have read barrier support.
-void CodeGenerator::CreateLoadClassLocationSummary(HLoadClass* cls,
- Location runtime_type_index_location,
- Location runtime_return_location,
- bool code_generator_supports_read_barrier) {
- ArenaAllocator* allocator = cls->GetBlock()->GetGraph()->GetArena();
- LocationSummary::CallKind call_kind = cls->NeedsAccessCheck()
- ? LocationSummary::kCallOnMainOnly
- : (((code_generator_supports_read_barrier && kEmitCompilerReadBarrier) ||
- cls->CanCallRuntime())
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall);
- LocationSummary* locations = new (allocator) LocationSummary(cls, call_kind);
+void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
+ Location runtime_type_index_location,
+ Location runtime_return_location) {
+ DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
+ DCHECK_EQ(cls->InputCount(), 1u);
+ LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ cls, LocationSummary::kCallOnMainOnly);
+ locations->SetInAt(0, Location::NoLocation());
+ locations->AddTemp(runtime_type_index_location);
+ locations->SetOut(runtime_return_location);
+}
+
+void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
+ DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
+ LocationSummary* locations = cls->GetLocations();
+ MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
if (cls->NeedsAccessCheck()) {
- locations->SetInAt(0, Location::NoLocation());
- locations->AddTemp(runtime_type_index_location);
- locations->SetOut(runtime_return_location);
+ CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+ InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
+ } else if (cls->MustGenerateClinitCheck()) {
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ InvokeRuntime(kQuickInitializeStaticStorage, cls, cls->GetDexPc());
} else {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ InvokeRuntime(kQuickInitializeType, cls, cls->GetDexPc());
}
}
-
void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
// The DCHECKS below check that a register is not specified twice in
// the summary. The out location can overlap with an input, so we need
@@ -830,8 +839,8 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
// last emitted is different than the native pc of the stack map just emitted.
size_t number_of_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
if (number_of_stack_maps > 1) {
- DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_offset,
- stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_offset);
+ DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
+ stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
}
}
}
@@ -839,7 +848,8 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
bool CodeGenerator::HasStackMapAtCurrentPc() {
uint32_t pc = GetAssembler()->CodeSize();
size_t count = stack_map_stream_.GetNumberOfStackMaps();
- return count > 0 && stack_map_stream_.GetStackMap(count - 1).native_pc_offset == pc;
+ CodeOffset native_pc_offset = stack_map_stream_.GetStackMap(count - 1).native_pc_code_offset;
+ return (count > 0) && (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
}
void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
@@ -927,10 +937,10 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
if (environment->GetParent() != nullptr) {
// We emit the parent environment first.
EmitEnvironment(environment->GetParent(), slow_path);
- stack_map_stream_.BeginInlineInfoEntry(environment->GetMethodIdx(),
+ stack_map_stream_.BeginInlineInfoEntry(environment->GetMethod(),
environment->GetDexPc(),
- environment->GetInvokeType(),
- environment->Size());
+ environment->Size(),
+ &graph_->GetDexFile());
}
// Walk over the environment, and record the location of dex registers.
@@ -1378,28 +1388,21 @@ uint32_t CodeGenerator::GetReferenceDisableFlagOffset() const {
void CodeGenerator::EmitJitRoots(uint8_t* code,
Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data,
- Handle<mirror::DexCache> outer_dex_cache) {
+ const uint8_t* roots_data) {
DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
- StackHandleScope<1> hs(Thread::Current());
- MutableHandle<mirror::DexCache> h_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
size_t index = 0;
for (auto& entry : jit_string_roots_) {
- const DexFile& entry_dex_file = *entry.first.dex_file;
- // Avoid the expensive FindDexCache call by checking if the string is
- // in the compiled method's dex file.
- h_dex_cache.Assign(IsSameDexFile(*outer_dex_cache->GetDexFile(), entry_dex_file)
- ? outer_dex_cache.Get()
- : class_linker->FindDexCache(hs.Self(), entry_dex_file));
- mirror::String* string = class_linker->LookupString(
- entry_dex_file, entry.first.string_index, h_dex_cache);
- DCHECK(string != nullptr) << "JIT roots require strings to have been loaded";
+ // Update the `roots` with the string, and replace the address temporarily
+ // stored to the index in the table.
+ uint64_t address = entry.second;
+ roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
+ DCHECK(roots->Get(index) != nullptr);
+ entry.second = index;
// Ensure the string is strongly interned. This is a requirement on how the JIT
// handles strings. b/32995596
- class_linker->GetInternTable()->InternStrong(string);
- roots->Set(index, string);
- entry.second = index;
+ class_linker->GetInternTable()->InternStrong(
+ reinterpret_cast<mirror::String*>(roots->Get(index)));
++index;
}
for (auto& entry : jit_class_roots_) {
@@ -1407,6 +1410,7 @@ void CodeGenerator::EmitJitRoots(uint8_t* code,
// stored to the index in the table.
uint64_t address = entry.second;
roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
+ DCHECK(roots->Get(index) != nullptr);
entry.second = index;
++index;
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2e2c3c00af..2d129aff22 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -351,8 +351,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// Also emits literal patches.
void EmitJitRoots(uint8_t* code,
Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data,
- Handle<mirror::DexCache> outer_dex_cache)
+ const uint8_t* roots_data)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLeafMethod() const {
@@ -427,12 +426,12 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
}
- // Perfoms checks pertaining to an InvokeRuntime call.
+ // Performs checks pertaining to an InvokeRuntime call.
void ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
SlowPathCode* slow_path);
- // Perfoms checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call.
+ // Performs checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call.
static void ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
SlowPathCode* slow_path);
@@ -496,6 +495,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
+ void GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke);
+
void CreateUnresolvedFieldLocationSummary(
HInstruction* field_access,
Primitive::Type field_type,
@@ -508,11 +509,10 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
uint32_t dex_pc,
const FieldAccessCallingConvention& calling_convention);
- // TODO: This overlaps a bit with MoveFromReturnRegister. Refactor for a better design.
- static void CreateLoadClassLocationSummary(HLoadClass* cls,
- Location runtime_type_index_location,
- Location runtime_return_location,
- bool code_generator_supports_read_barrier = false);
+ static void CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
+ Location runtime_type_index_location,
+ Location runtime_return_location);
+ void GenerateLoadClassRuntimeCall(HLoadClass* cls);
static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
@@ -522,7 +522,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
virtual void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path) = 0;
+ SlowPathCode* slow_path = nullptr) = 0;
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
@@ -608,7 +608,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
number_of_register_pairs_(number_of_register_pairs),
core_callee_save_mask_(core_callee_save_mask),
fpu_callee_save_mask_(fpu_callee_save_mask),
- stack_map_stream_(graph->GetArena()),
+ stack_map_stream_(graph->GetArena(), graph->GetInstructionSet()),
block_order_(nullptr),
jit_string_roots_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -713,9 +713,9 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
const ArenaVector<HBasicBlock*>* block_order_;
// Maps a StringReference (dex_file, string_index) to the index in the literal table.
- // Entries are intially added with a 0 index, and `EmitJitRoots` will compute all the
- // indices.
- ArenaSafeMap<StringReference, uint32_t, StringReferenceValueComparator> jit_string_roots_;
+ // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
+ // will compute all the indices.
+ ArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
// Maps a ClassReference (dex_file, type_index) to the index in the literal table.
// Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e469b1d279..2a30178ef6 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -371,22 +371,23 @@ class LoadClassSlowPathARM : public SlowPathCodeARM {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeARM(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeARM(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- arm_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ arm_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -400,6 +401,23 @@ class LoadClassSlowPathARM : public SlowPathCodeARM {
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ CodeGeneratorARM::PcRelativePatchInfo* labels =
+ arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(IP, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(IP, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(IP, IP, ShifterOperand(PC));
+ __ str(locations->Out().AsRegister<Register>(), Address(IP));
+ }
__ b(GetExitLabel());
}
@@ -409,10 +427,6 @@ class LoadClassSlowPathARM : public SlowPathCodeARM {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -430,7 +444,7 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
Register out = locations->Out().AsRegister<Register>();
Register temp = locations->GetTemp(0).AsRegister<Register>();
constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
@@ -449,7 +463,7 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
__ mov(entry_address, ShifterOperand(temp));
}
- __ LoadImmediate(calling_convention.GetRegisterAt(0), string_index);
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), string_index.index_);
arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -1208,6 +1222,7 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
@@ -1224,7 +1239,8 @@ void CodeGeneratorARM::Finalize(CodeAllocator* allocator) {
// Adjust native pc offsets in stack maps.
for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
- uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+ uint32_t old_position =
+ stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kThumb2);
uint32_t new_position = __ GetAdjustedPosition(old_position);
stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
}
@@ -2370,6 +2386,14 @@ void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke)
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderARM::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderARM::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -3936,7 +3960,6 @@ void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(Location::RegisterLocation(R0));
}
@@ -3954,7 +3977,7 @@ void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
@@ -3962,19 +3985,16 @@ void LocationsBuilderARM::VisitNewArray(HNewArray* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(R0));
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
- InvokeRuntimeCallingConvention calling_convention;
- __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex().index_);
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
+ codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
@@ -5709,17 +5729,11 @@ HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kJitTableAddress:
- break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops()) {
- return HLoadClass::LoadKind::kDexCacheViaMethod;
- }
+ break;
+ case HLoadClass::LoadKind::kJitTableAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
@@ -5728,15 +5742,16 @@ HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
}
void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(R0),
- /* code_generator_supports_read_barrier */ true);
+ Location::RegisterLocation(R0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -5747,24 +5762,23 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -5772,7 +5786,7 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -5786,12 +5800,14 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ LoadLiteral(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
CodeGeneratorARM::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
@@ -5805,41 +5821,36 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ CodeGeneratorARM::PcRelativePatchInfo* labels =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(out, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(out, /* placeholder */ 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(out, out, ShifterOperand(PC));
+ GenerateGcRootFieldLoad(cls, out_loc, out, 0, kCompilerReadBarrierOption);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
__ LoadLiteral(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
- cls->GetAddress()));
+ cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- Register base_reg = locations->InAt(0).AsRegister<Register>();
- HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
- int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
- // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- Register current_method = locations->InAt(0).AsRegister<Register>();
- __ LoadFromOffset(kLoadWord,
- out,
- current_method,
- ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -5937,7 +5948,9 @@ void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
}
}
-void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -5945,6 +5958,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out, codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
load->GetStringIndex()));
return; // No dex cache slow path.
@@ -5952,7 +5966,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARM::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
__ BindTrackedLabel(&labels->movw_label);
__ movw(out, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->movt_label);
@@ -5962,8 +5976,9 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
@@ -5971,7 +5986,7 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
Register temp = locations->GetTemp(0).AsRegister<Register>();
CodeGeneratorARM::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
__ BindTrackedLabel(&labels->movw_label);
__ movw(temp, /* placeholder */ 0u);
__ BindTrackedLabel(&labels->movt_label);
@@ -5987,7 +6002,8 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
}
case HLoadString::LoadKind::kJitTableAddress: {
__ LoadLiteral(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
+ load->GetStringIndex(),
+ load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
return;
@@ -7135,18 +7151,7 @@ void CodeGeneratorARM::GenerateReadBarrierForRootSlow(HInstruction* instruction,
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops() &&
- (dispatch_info.method_load_kind ==
- HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative)) {
- dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
- }
-
- return dispatch_info;
+ return desired_dispatch_info;
}
Register CodeGeneratorARM::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
@@ -7281,8 +7286,8 @@ void CodeGeneratorARM::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp
}
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeTypePatch(
@@ -7290,6 +7295,11 @@ CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeTypePatch(
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorARM::PcRelativePatchInfo* CodeGeneratorARM::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -7322,8 +7332,10 @@ Literal* CodeGeneratorARM::DeduplicateBootImageAddressLiteral(uint32_t address)
}
Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -7331,8 +7343,9 @@ Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file,
Literal* CodeGeneratorARM::DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
@@ -7366,6 +7379,7 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
boot_image_address_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
@@ -7380,12 +7394,17 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
Literal* literal = entry.second;
@@ -7395,8 +7414,6 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
target_type.dex_file,
target_type.type_index.index_));
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
- linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
Literal* literal = entry.second;
@@ -7404,6 +7421,7 @@ void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
uint32_t literal_offset = literal->GetLabel()->Position();
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
Literal* CodeGeneratorARM::DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map) {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 0376f0394c..df2dbc74ab 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -482,18 +482,22 @@ class CodeGeneratorARM : public CodeGenerator {
Label add_pc_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index);
Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
- Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, dex::StringIndex string_index);
+ Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address);
+ Handle<mirror::Class> handle);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
@@ -634,8 +638,10 @@ class CodeGeneratorARM : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
TypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0fdfeaf4f4..d4dcbc0501 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -276,22 +276,23 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeARM64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeARM64(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- arm64_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ arm64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -302,11 +303,32 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
- Primitive::Type type = at_->GetType();
+ Primitive::Type type = instruction_->GetType();
arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
-
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ UseScratchRegisterScope temps(arm64_codegen->GetVIXLAssembler());
+ Register temp = temps.AcquireX();
+ const DexFile& dex_file = cls_->GetDexFile();
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the ADRP in the fast path, so that we
+ // can avoid the ADRP here.
+ vixl::aarch64::Label* adrp_label =
+ arm64_codegen->NewBssEntryTypePatch(dex_file, type_index);
+ arm64_codegen->EmitAdrpPlaceholder(adrp_label, temp);
+ vixl::aarch64::Label* strp_label =
+ arm64_codegen->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
+ {
+ SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
+ __ Bind(strp_label);
+ __ str(RegisterFrom(locations->Out(), Primitive::kPrimNot),
+ MemOperand(temp, /* offset placeholder */ 0));
+ }
+ }
__ B(GetExitLabel());
}
@@ -316,10 +338,6 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -349,8 +367,8 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
- __ Mov(calling_convention.GetRegisterAt(0).W(), string_index);
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ Mov(calling_convention.GetRegisterAt(0).W(), string_index.index_);
arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
Primitive::Type type = instruction_->GetType();
@@ -1154,6 +1172,7 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
@@ -3995,7 +4014,7 @@ Location CodeGeneratorARM64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStati
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
// Add ADRP with its PC-relative DexCache access patch.
- const DexFile& dex_file = invoke->GetDexFile();
+ const DexFile& dex_file = invoke->GetDexFileForPcRelativeDexCache();
uint32_t element_offset = invoke->GetDexCacheArrayOffset();
vixl::aarch64::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
@@ -4087,11 +4106,20 @@ void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location te
__ Blr(lr);
}
+void LocationsBuilderARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(
const DexFile& dex_file,
- uint32_t string_index,
+ dex::StringIndex string_index,
vixl::aarch64::Label* adrp_label) {
- return NewPcRelativePatch(dex_file, string_index, adrp_label, &pc_relative_string_patches_);
+ return
+ NewPcRelativePatch(dex_file, string_index.index_, adrp_label, &pc_relative_string_patches_);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(
@@ -4101,6 +4129,13 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(
return NewPcRelativePatch(dex_file, type_index.index_, adrp_label, &pc_relative_type_patches_);
}
+vixl::aarch64::Label* CodeGeneratorARM64::NewBssEntryTypePatch(
+ const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ vixl::aarch64::Label* adrp_label) {
+ return NewPcRelativePatch(dex_file, type_index.index_, adrp_label, &type_bss_entry_patches_);
+}
+
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file,
uint32_t element_offset,
@@ -4144,16 +4179,18 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddres
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
- const DexFile& dex_file, dex::StringIndex string_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
+ const DexFile& dex_file, dex::StringIndex string_index, Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
- const DexFile& dex_file, dex::TypeIndex type_index, uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ const DexFile& dex_file, dex::TypeIndex type_index, Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4206,6 +4243,7 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
pc_relative_type_patches_.size() +
+ type_bss_entry_patches_.size() +
boot_image_address_patches_.size();
linker_patches->reserve(size);
for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
@@ -4222,12 +4260,17 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
vixl::aarch64::Literal<uint32_t>* literal = entry.second;
@@ -4235,13 +4278,12 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patc
target_type.dex_file,
target_type.type_index.index_));
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
- linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
vixl::aarch64::Literal<uint32_t>* literal = entry.second;
linker_patches->push_back(LinkerPatch::RecordPosition(literal->GetOffset()));
}
+ DCHECK_EQ(size, linker_patches->size());
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value,
@@ -4304,12 +4346,12 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
+ case HLoadClass::LoadKind::kBssEntry:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJitCompilation());
- break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -4317,15 +4359,16 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
}
void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
LocationFrom(calling_convention.GetRegisterAt(0)),
- LocationFrom(vixl::aarch64::x0),
- /* code_generator_supports_read_barrier */ true);
+ LocationFrom(vixl::aarch64::x0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -4336,21 +4379,21 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(cls->GetLocations()->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
@@ -4359,7 +4402,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -4393,59 +4436,46 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK(cls->GetAddress() != 0u && IsUint<32>(cls->GetAddress()));
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress()));
- break;
- }
- case HLoadClass::LoadKind::kJitTableAddress: {
- __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
- cls->GetTypeIndex(),
- cls->GetAddress()));
- GenerateGcRootFieldLoad(cls,
- out_loc,
- out.X(),
- /* offset */ 0,
- /* fixup_label */ nullptr,
- kCompilerReadBarrierOption);
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
+ __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- // Add ADRP with its PC-relative DexCache access patch.
+ case HLoadClass::LoadKind::kBssEntry: {
+ // Add ADRP with its PC-relative Class .bss entry patch.
const DexFile& dex_file = cls->GetDexFile();
- uint32_t element_offset = cls->GetDexCacheElementOffset();
- vixl::aarch64::Label* adrp_label =
- codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
+ dex::TypeIndex type_index = cls->GetTypeIndex();
+ vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
- // Add LDR with its PC-relative DexCache access patch.
+ // Add LDR with its PC-relative Class patch.
vixl::aarch64::Label* ldr_label =
- codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
+ codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(cls,
- out_loc,
+ cls->GetLocations()->Out(),
out.X(),
- /* offset placeholder */ 0,
+ /* placeholder */ 0u,
ldr_label,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
+ kCompilerReadBarrierOption);
+ generate_null_check = true;
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- MemberOffset resolved_types_offset =
- ArtMethod::DexCacheResolvedTypesOffset(kArm64PointerSize);
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- Register current_method = InputRegisterAt(cls, 0);
- __ Ldr(out.X(), MemOperand(current_method, resolved_types_offset.Int32Value()));
- // /* GcRoot<mirror::Class> */ out = out[type_index]
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
+ cls->GetTypeIndex(),
+ cls->GetClass()));
GenerateGcRootFieldLoad(cls,
out_loc,
out.X(),
- CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_),
+ /* offset */ 0,
/* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
+ kCompilerReadBarrierOption);
break;
}
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -4500,11 +4530,11 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
}
return desired_string_load_kind;
}
@@ -4534,7 +4564,9 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
}
}
-void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
Register out = OutputRegister(load);
Location out_loc = load->GetLocations()->Out();
@@ -4546,7 +4578,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
// Add ADRP with its PC-relative String patch.
const DexFile& dex_file = load->GetDexFile();
- uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
@@ -4557,14 +4589,16 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK(load->GetAddress() != 0u && IsUint<32>(load->GetAddress()));
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(load->GetAddress()));
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
+ __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBssEntry: {
// Add ADRP with its PC-relative String .bss entry patch.
const DexFile& dex_file = load->GetDexFile();
- uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
Register temp = temps.AcquireX();
@@ -4589,7 +4623,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
}
case HLoadString::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
+ load->GetStringIndex(),
+ load->GetString()));
GenerateGcRootFieldLoad(load,
out_loc,
out.X(),
@@ -4719,22 +4754,16 @@ void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetOut(LocationFrom(x0));
- locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations = instruction->GetLocations();
- InvokeRuntimeCallingConvention calling_convention;
- Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
- DCHECK(type_index.Is(w0));
- __ Mov(type_index, instruction->GetTypeIndex().index_);
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
+ codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -4745,7 +4774,6 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(LocationFrom(kArtMethodRegister));
} else {
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -4763,7 +4791,7 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 69b1be4ac2..7d3c655b27 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -541,7 +541,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
// ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
// to the associated ADRP patch label).
vixl::aarch64::Label* NewPcRelativeStringPatch(const DexFile& dex_file,
- uint32_t string_index,
+ dex::StringIndex string_index,
vixl::aarch64::Label* adrp_label = nullptr);
// Add a new PC-relative type patch for an instruction and return the label
@@ -552,6 +552,14 @@ class CodeGeneratorARM64 : public CodeGenerator {
dex::TypeIndex type_index,
vixl::aarch64::Label* adrp_label = nullptr);
+ // Add a new .bss entry type patch for an instruction and return the label
+ // to be bound before the instruction. The instruction will be either the
+ // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
+ // to the associated ADRP patch label).
+ vixl::aarch64::Label* NewBssEntryTypePatch(const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ vixl::aarch64::Label* adrp_label = nullptr);
+
// Add a new PC-relative dex cache array patch for an instruction and return
// the label to be bound before the instruction. The instruction will be
// either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
@@ -568,10 +576,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
dex::TypeIndex type_index);
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index);
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex string_index,
- uint64_t address);
+ Handle<mirror::Class> handle);
void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
@@ -744,8 +753,10 @@ class CodeGeneratorARM64 : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
TypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 9f2720e46a..6c66f8f3ed 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -394,22 +394,23 @@ class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
LoadClassSlowPathARMVIXL(HLoadClass* cls, HInstruction* at, uint32_t dex_pc, bool do_clinit)
- : SlowPathCodeARMVIXL(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeARMVIXL(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- __ Mov(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- arm_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ arm_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -423,6 +424,20 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
arm_codegen->Move32(locations->Out(), LocationFrom(r0));
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ UseScratchRegisterScope temps(down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
+ __ Str(OutputRegister(cls_), MemOperand(temp));
+ }
__ B(GetExitLabel());
}
@@ -432,10 +447,6 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -454,7 +465,7 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex().index_;
+ const dex::StringIndex string_index = load->GetStringIndex();
vixl32::Register out = OutputRegister(load);
vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
@@ -473,7 +484,7 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ Mov(entry_address, temp);
}
- __ Mov(calling_convention.GetRegisterAt(0), string_index);
+ __ Mov(calling_convention.GetRegisterAt(0), string_index.index_);
arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
@@ -1252,6 +1263,7 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
@@ -2445,6 +2457,14 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
}
}
+void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -3948,7 +3968,6 @@ void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(LocationFrom(r0));
}
@@ -3970,7 +3989,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
@@ -3978,19 +3997,16 @@ void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetOut(LocationFrom(r0));
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
}
void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- __ Mov(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex().index_);
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
+ codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
@@ -5790,17 +5806,11 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kJitTableAddress:
- break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops()) {
- return HLoadClass::LoadKind::kDexCacheViaMethod;
- }
+ break;
+ case HLoadClass::LoadKind::kJitTableAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
@@ -5809,15 +5819,16 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
}
void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
LocationFrom(calling_convention.GetRegisterAt(0)),
- LocationFrom(r0),
- /* code_generator_supports_read_barrier */ true);
+ LocationFrom(r0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -5828,24 +5839,23 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
vixl32::Register out = OutputRegister(cls);
@@ -5853,7 +5863,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -5867,12 +5877,14 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ Ldr(out, codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
@@ -5881,43 +5893,31 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(cls, out_loc, out, 0, kCompilerReadBarrierOption);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
- cls->GetAddress()));
+ cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- vixl32::Register base_reg = InputRegisterAt(cls, 0);
- HArmDexCacheArraysBase* base = cls->InputAt(0)->AsArmDexCacheArraysBase();
- int32_t offset = cls->GetDexCacheElementOffset() - base->GetElementOffset();
- // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_reg, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- vixl32::Register current_method = InputRegisterAt(cls, 0);
- const int32_t resolved_types_offset =
- ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value();
- GetAssembler()->LoadFromOffset(kLoadWord, out, current_method, resolved_types_offset);
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- default:
- TODO_VIXL32(FATAL);
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -6022,7 +6022,9 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
}
}
-void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
vixl32::Register out = OutputRegister(load);
@@ -6037,13 +6039,14 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
return; // No dex cache slow path.
}
@@ -6051,7 +6054,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, temp);
GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
@@ -6063,7 +6066,8 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) {
}
case HLoadString::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
- load->GetStringIndex()));
+ load->GetStringIndex(),
+ load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
return;
@@ -7229,18 +7233,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruct
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
- // We disable pc-relative load when there is an irreducible loop, as the optimization
- // is incompatible with it.
- // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- if (GetGraph()->HasIrreducibleLoops() &&
- (dispatch_info.method_load_kind ==
- HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative)) {
- dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
- }
-
- return dispatch_info;
+ return desired_dispatch_info;
}
vixl32::Register CodeGeneratorARMVIXL::GetInvokeStaticOrDirectExtraParameter(
@@ -7400,8 +7393,8 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall(HInvokeVirtual* invoke, Location
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeTypePatch(
@@ -7409,6 +7402,11 @@ CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeTy
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -7450,9 +7448,12 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateDexCacheAddressLiteral(uint3
return DeduplicateUint32Literal(address, &uint32_literals_);
}
-VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
+VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
+ const DexFile& dex_file,
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() {
@@ -7462,8 +7463,9 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(const DexFi
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() {
@@ -7499,6 +7501,7 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
/* MOVW+MOVT for each entry */ 2u * pc_relative_string_patches_.size() +
boot_image_type_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * pc_relative_type_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
boot_image_address_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
@@ -7513,12 +7516,17 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
target_string.string_index.index_));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
VIXLUInt32Literal* literal = entry.second;
@@ -7528,8 +7536,6 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
target_type.dex_file,
target_type.type_index.index_));
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
- linker_patches);
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
VIXLUInt32Literal* literal = entry.second;
@@ -7537,6 +7543,7 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pa
uint32_t literal_offset = literal->GetLocation();
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal(
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index abeadd5d1b..8ae3b7dc39 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -563,8 +563,10 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
vixl::aarch32::Label add_pc_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
VIXLUInt32Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
@@ -574,10 +576,11 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
VIXLUInt32Literal* DeduplicateDexCacheAddressLiteral(uint32_t address);
VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
- dex::StringIndex string_index);
+ dex::StringIndex string_index,
+ Handle<mirror::String> handle);
VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address);
+ Handle<mirror::Class> handle);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
@@ -731,8 +734,10 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
TypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 01e0dac33e..76be74e921 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -213,23 +213,24 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeMIPS(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeMIPS(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- mips_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ mips_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -240,11 +241,26 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
- Primitive::Type type = at_->GetType();
+ Primitive::Type type = instruction_->GetType();
mips_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
+ Register base = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
+ DCHECK_NE(out.AsRegister<Register>(), AT);
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ mips_codegen->EmitPcRelativeAddressPlaceholder(info, TMP, base);
+ __ StoreToOffset(kStoreWord, out.AsRegister<Register>(), TMP, 0);
+ }
__ B(GetExitLabel());
}
@@ -254,10 +270,6 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -281,8 +293,8 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
InvokeRuntimeCallingConvention calling_convention;
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = load->GetStringIndex().index_;
- __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
+ const dex::StringIndex string_index = load->GetStringIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
Primitive::Type type = instruction_->GetType();
@@ -465,6 +477,7 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
@@ -483,7 +496,8 @@ void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
// Adjust native pc offsets in stack maps.
for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
- uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+ uint32_t old_position =
+ stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
@@ -1007,6 +1021,7 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
pc_relative_dex_cache_patches_.size() +
pc_relative_string_patches_.size() +
pc_relative_type_patches_.size() +
+ type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
boot_image_type_patches_.size() +
boot_image_address_patches_.size();
@@ -1014,13 +1029,16 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
for (const auto& entry : boot_image_string_patches_) {
const StringReference& target_string = entry.first;
@@ -1047,11 +1065,12 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patch
uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeTypePatch(
@@ -1059,6 +1078,11 @@ CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeTypePatc
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -5154,6 +5178,14 @@ void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invo
}
}
+void LocationsBuilderMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
if (invoke->GetLocations()->Intrinsified()) {
IntrinsicCodeGeneratorMIPS intrinsic(codegen);
@@ -5186,14 +5218,14 @@ HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- fallback_load = false;
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
// TODO: implement.
fallback_load = true;
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ fallback_load = false;
+ break;
}
if (fallback_load) {
desired_string_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
@@ -5222,15 +5254,13 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
+ case HLoadClass::LoadKind::kBssEntry:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
fallback_load = true;
break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJitCompilation());
- // TODO: Create as many MipsDexCacheArraysBase instructions as needed for methods
- // with irreducible loops.
- break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
fallback_load = false;
break;
@@ -5427,34 +5457,32 @@ void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
}
void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(V0),
- /* code_generator_supports_read_barrier */ false); // TODO: revisit this bool.
+ Location::RegisterLocation(V0));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBssEntry:
if (codegen_->GetInstructionSetFeatures().IsR6()) {
break;
}
FALLTHROUGH_INTENDED;
- // We need an extra register for PC-relative dex cache accesses.
- case HLoadClass::LoadKind::kDexCachePcRelative:
case HLoadClass::LoadKind::kReferrersClass:
- case HLoadClass::LoadKind::kDexCacheViaMethod:
locations->SetInAt(0, Location::RequiresRegister());
break;
default:
@@ -5463,16 +5491,17 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
Register base_or_current_method_reg;
@@ -5480,12 +5509,11 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBssEntry:
base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
break;
- // We need an extra register for PC-relative dex cache accesses.
- case HLoadClass::LoadKind::kDexCachePcRelative:
case HLoadClass::LoadKind::kReferrersClass:
case HLoadClass::LoadKind::kDexCacheViaMethod:
base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
@@ -5508,14 +5536,14 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
@@ -5523,38 +5551,29 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK(!kEmitCompilerReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
- case HLoadClass::LoadKind::kJitTableAddress: {
- LOG(FATAL) << "Unimplemented";
+ case HLoadClass::LoadKind::kBssEntry: {
+ CodeGeneratorMIPS::PcRelativePatchInfo* info =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
+ __ LoadFromOffset(kLoadWord, out, out, 0);
+ generate_null_check = true;
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- HMipsDexCacheArraysBase* base = cls->InputAt(0)->AsMipsDexCacheArraysBase();
- int32_t offset =
- cls->GetDexCacheElementOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset;
- // /* GcRoot<mirror::Class> */ out = *(dex_cache_arrays_base + offset)
- GenerateGcRootFieldLoad(cls, out_loc, base_or_current_method_reg, offset);
- generate_null_check = !cls->IsInDexCache();
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ LOG(FATAL) << "Unimplemented";
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- __ LoadFromOffset(kLoadWord,
- out,
- base_or_current_method_reg,
- ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value());
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset);
- generate_null_check = !cls->IsInDexCache();
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -5625,7 +5644,9 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
}
}
-void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
@@ -5647,6 +5668,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
@@ -5655,13 +5677,14 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageAddressLiteral(address));
@@ -5670,7 +5693,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholder(info, out, base_or_current_method_reg);
__ LoadFromOffset(kLoadWord, out, out, 0);
SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
@@ -5875,21 +5898,14 @@ void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
- InvokeRuntimeCallingConvention calling_convention;
- Register current_method_register = calling_convention.GetRegisterAt(2);
- __ Lw(current_method_register, SP, kCurrentMethodStackOffset);
- // Move an uint16_t value to a register.
- __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex().index_);
- codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
- void*, uint32_t, int32_t, ArtMethod*>();
+ codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
@@ -5900,7 +5916,6 @@ void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -5917,7 +5932,7 @@ void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 7b0812cb7b..c8fd325999 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -452,8 +452,10 @@ class CodeGeneratorMIPS : public CodeGenerator {
MipsLabel pc_rel_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
@@ -504,8 +506,10 @@ class CodeGeneratorMIPS : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
BootTypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 36690c0569..192b4a5050 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -167,22 +167,23 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCodeMIPS64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeMIPS64(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex().index_);
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType;
- mips64_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
+ mips64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -193,11 +194,24 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
- Primitive::Type type = at_->GetType();
+ Primitive::Type type = instruction_->GetType();
mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ // TODO: Change art_quick_initialize_type/art_quick_initialize_static_storage to
+ // kSaveEverything and use a temporary for the .bss entry address in the fast path,
+ // so that we can avoid another calculation here.
+ DCHECK_NE(out.AsRegister<GpuRegister>(), AT);
+ CodeGeneratorMIPS64::PcRelativePatchInfo* info =
+ mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
+ mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info, AT);
+ __ Sw(out.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ }
__ Bc(GetExitLabel());
}
@@ -207,10 +221,6 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -234,8 +244,8 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
InvokeRuntimeCallingConvention calling_convention;
HLoadString* load = instruction_->AsLoadString();
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
- __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
instruction_->GetDexPc(),
@@ -422,6 +432,7 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
boot_image_type_patches_(TypeReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Save RA (containing the return address) to mimic Quick.
@@ -439,7 +450,8 @@ void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
// Adjust native pc offsets in stack maps.
for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
- uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+ uint32_t old_position =
+ stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips64);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
@@ -922,6 +934,7 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
pc_relative_dex_cache_patches_.size() +
pc_relative_string_patches_.size() +
pc_relative_type_patches_.size() +
+ type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
boot_image_type_patches_.size() +
boot_image_address_patches_.size();
@@ -929,13 +942,16 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(pc_relative_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
linker_patches);
} else {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
linker_patches);
}
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
linker_patches);
for (const auto& entry : boot_image_string_patches_) {
const StringReference& target_string = entry.first;
@@ -962,11 +978,12 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
+ DCHECK_EQ(size, linker_patches->size());
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeStringPatch(
- const DexFile& dex_file, uint32_t string_index) {
- return NewPcRelativePatch(dex_file, string_index, &pc_relative_string_patches_);
+ const DexFile& dex_file, dex::StringIndex string_index) {
+ return NewPcRelativePatch(dex_file, string_index.index_, &pc_relative_string_patches_);
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeTypePatch(
@@ -974,6 +991,11 @@ CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeType
return NewPcRelativePatch(dex_file, type_index.index_, &pc_relative_type_patches_);
}
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewTypeBssEntryPatch(
+ const DexFile& dex_file, dex::TypeIndex type_index) {
+ return NewPcRelativePatch(dex_file, type_index.index_, &type_bss_entry_patches_);
+}
+
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeDexCacheArrayPatch(
const DexFile& dex_file, uint32_t element_offset) {
return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
@@ -3095,7 +3117,7 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(
Location root,
GpuRegister obj,
uint32_t offset) {
- // When handling HLoadClass::LoadKind::kDexCachePcRelative, the caller calls
+ // When handling PC-relative loads, the caller calls
// EmitPcRelativeAddressPlaceholderHigh() and then GenerateGcRootFieldLoad().
// The relative patcher expects the two methods to emit the following patchable
// sequence of instructions in this case:
@@ -3256,6 +3278,14 @@ void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* in
HandleInvoke(invoke);
}
+void LocationsBuilderMIPS64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
if (invoke->GetLocations()->Intrinsified()) {
IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
@@ -3314,14 +3344,14 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
+ case HLoadClass::LoadKind::kBssEntry:
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
// TODO: implement.
fallback_load = true;
break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
- DCHECK(!Runtime::Current()->UseJitCompilation());
- break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -3366,7 +3396,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
uint32_t offset = invoke->GetDexCacheArrayOffset();
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset);
+ NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset);
EmitPcRelativeAddressPlaceholderHigh(info, AT);
__ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
break;
@@ -3474,38 +3504,38 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke)
}
void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- calling_convention.GetReturnLocation(Primitive::kPrimNot),
- /* code_generator_supports_read_barrier */ false);
+ calling_convention.GetReturnLocation(Primitive::kPrimNot));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || kEmitCompilerReadBarrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
GpuRegister out = out_loc.AsRegister<GpuRegister>();
GpuRegister current_method_reg = ZERO;
@@ -3526,14 +3556,14 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
ArtMethod::DeclaringClassOffset().Int32Value());
break;
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageTypeLiteral(cls->GetDexFile(),
cls->GetTypeIndex()));
break;
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
@@ -3542,39 +3572,29 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK(!kEmitCompilerReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
- case HLoadClass::LoadKind::kJitTableAddress: {
- LOG(FATAL) << "Unimplemented";
- break;
- }
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- uint32_t element_offset = cls->GetDexCacheElementOffset();
+ case HLoadClass::LoadKind::kBssEntry: {
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeDexCacheArrayPatch(cls->GetDexFile(), element_offset);
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
- // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(cls, out_loc, AT, /* placeholder */ 0x5678);
- generate_null_check = !cls->IsInDexCache();
+ __ Lwu(out, AT, /* placeholder */ 0x5678);
+ generate_null_check = true;
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- __ LoadFromOffset(kLoadDoubleword,
- out,
- current_method_reg,
- ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- size_t offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_);
- GenerateGcRootFieldLoad(cls, out_loc, out, offset);
- generate_null_check = !cls->IsInDexCache();
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ LOG(FATAL) << "Unimplemented";
+ break;
}
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -3628,7 +3648,9 @@ void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
}
}
-void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
@@ -3636,6 +3658,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress:
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
@@ -3644,14 +3667,15 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
__ Daddiu(out, AT, /* placeholder */ 0x5678);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageAddressLiteral(address));
@@ -3660,7 +3684,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex().index_);
+ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT);
__ Lwu(out, AT, /* placeholder */ 0x5678);
SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
@@ -3818,19 +3842,14 @@ void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations = instruction->GetLocations();
- // Move an uint16_t value to a register.
- __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(),
- instruction->GetTypeIndex().index_);
- codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
+ codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
}
void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
@@ -3841,7 +3860,6 @@ void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -3859,7 +3877,7 @@ void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 8ac919f47e..52b780c106 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -411,8 +411,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
Mips64Label pc_rel_label;
};
- PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
+ PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
+ dex::StringIndex string_index);
PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
+ PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
uint32_t element_offset);
PcRelativePatchInfo* NewPcRelativeCallPatch(const DexFile& dex_file,
@@ -469,8 +471,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
BootTypeToLiteralMap boot_image_type_patches_;
- // PC-relative type patch info.
+ // PC-relative type patch info for kBootImageLinkTimePcRelative.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
+ // PC-relative type patch info for kBssEntry.
+ ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
// Deduplication map for patchable boot image addresses.
Uint32ToLiteralMap boot_image_address_patches_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 0abe85540c..853c91fac8 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -225,8 +225,8 @@ class LoadStringSlowPathX86 : public SlowPathCode {
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
- __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index));
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index.index_));
x86_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
@@ -254,21 +254,24 @@ class LoadClassSlowPathX86 : public SlowPathCode {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCode(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex().index_));
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(type_index.index_));
x86_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage
: kQuickInitializeType,
- at_, dex_pc_, this);
+ instruction_,
+ dex_pc_,
+ this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -281,8 +284,17 @@ class LoadClassSlowPathX86 : public SlowPathCode {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
x86_codegen->Move32(out, Location::RegisterLocation(EAX));
}
-
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ Register method_address = locations->InAt(0).AsRegister<Register>();
+ __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
+ locations->Out().AsRegister<Register>());
+ Label* fixup_label = x86_codegen->NewTypeBssEntryPatch(cls_);
+ __ Bind(fixup_label);
+ }
__ jmp(GetExitLabel());
}
@@ -292,10 +304,6 @@ class LoadClassSlowPathX86 : public SlowPathCode {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -1009,7 +1017,8 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
constant_area_start_(-1),
@@ -2244,6 +2253,14 @@ void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke)
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderX86::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderX86::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -4150,7 +4167,6 @@ void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
}
@@ -4166,7 +4182,7 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
DCHECK(!codegen_->IsLeafMethod());
}
}
@@ -4176,18 +4192,15 @@ void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) {
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
locations->SetOut(Location::RegisterLocation(EAX));
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
- InvokeRuntimeCallingConvention calling_convention;
- __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex().index_));
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
+ codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
}
@@ -4505,7 +4518,7 @@ Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticO
__ movl(temp.AsRegister<Register>(), Address(base_reg, kDummy32BitOffset));
// Bind a new fixup label at the end of the "movl" insn.
uint32_t offset = invoke->GetDexCacheArrayOffset();
- __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset));
+ __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -4594,9 +4607,15 @@ void CodeGeneratorX86::RecordBootStringPatch(HLoadString* load_string) {
__ Bind(&string_patches_.back().label);
}
-void CodeGeneratorX86::RecordTypePatch(HLoadClass* load_class) {
- type_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
- __ Bind(&type_patches_.back().label);
+void CodeGeneratorX86::RecordBootTypePatch(HLoadClass* load_class) {
+ boot_image_type_patches_.emplace_back(load_class->GetDexFile(),
+ load_class->GetTypeIndex().index_);
+ __ Bind(&boot_image_type_patches_.back().label);
+}
+
+Label* CodeGeneratorX86::NewTypeBssEntryPatch(HLoadClass* load_class) {
+ type_bss_entry_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
+ return &type_bss_entry_patches_.back().label;
}
Label* CodeGeneratorX86::NewStringBssEntryPatch(HLoadString* load_string) {
@@ -4633,7 +4652,8 @@ void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
pc_relative_dex_cache_patches_.size() +
simple_patches_.size() +
string_patches_.size() +
- type_patches_.size();
+ boot_image_type_patches_.size() +
+ type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -4642,24 +4662,26 @@ void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patche
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
} else if (GetCompilerOptions().GetCompilePic()) {
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(boot_image_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
} else {
+ for (const PatchInfo<Label>& info : boot_image_type_patches_) {
+ uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
+ linker_patches->push_back(LinkerPatch::TypePatch(literal_offset, &info.dex_file, info.index));
+ }
for (const PatchInfo<Label>& info : string_patches_) {
uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
linker_patches->push_back(
LinkerPatch::StringPatch(literal_offset, &info.dex_file, info.index));
}
}
- if (GetCompilerOptions().GetCompilePic()) {
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(type_patches_, linker_patches);
- } else {
- for (const PatchInfo<Label>& info : type_patches_) {
- uint32_t literal_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- linker_patches->push_back(LinkerPatch::TypePatch(literal_offset, &info.dex_file, info.index));
- }
- }
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
+ DCHECK_EQ(size, linker_patches->size());
}
void CodeGeneratorX86::MarkGCCard(Register temp,
@@ -5978,7 +6000,7 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
DCHECK(GetCompilerOptions().GetCompilePic());
FALLTHROUGH_INTENDED;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT.
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
@@ -6000,15 +6022,16 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
}
void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(EAX),
- /* code_generator_supports_read_barrier */ true);
+ Location::RegisterLocation(EAX));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -6019,11 +6042,9 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_kind == HLoadClass::LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ load_kind == HLoadClass::LoadKind::kBssEntry) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
@@ -6031,23 +6052,26 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
dex::TypeIndex dex_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_class_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_class_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
@@ -6055,7 +6079,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -6070,63 +6094,48 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ movl(out, Immediate(/* placeholder */ 0));
- codegen_->RecordTypePatch(cls);
+ codegen_->RecordBootTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
- codegen_->RecordTypePatch(cls);
+ codegen_->RecordBootTypePatch(cls);
break;
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address));
codegen_->RecordSimplePatch();
break;
}
+ case HLoadClass::LoadKind::kBssEntry: {
+ Register method_address = locations->InAt(0).AsRegister<Register>();
+ Address address(method_address, CodeGeneratorX86::kDummy32BitOffset);
+ Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
+ generate_null_check = true;
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewJitRootClassPatch(
- cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress());
+ cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
// /* GcRoot<mirror::Class> */ out = *address
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
break;
}
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- Register base_reg = locations->InAt(0).AsRegister<Register>();
- uint32_t offset = cls->GetDexCacheElementOffset();
- Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(cls->GetDexFile(), offset);
- // /* GcRoot<mirror::Class> */ out = *(base + offset) /* PC-relative */
- GenerateGcRootFieldLoad(cls,
- out_loc,
- Address(base_reg, CodeGeneratorX86::kDummy32BitOffset),
- fixup_label,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- Register current_method = locations->InAt(0).AsRegister<Register>();
- __ movl(out, Address(current_method,
- ArtMethod::DexCacheResolvedTypesOffset(kX86PointerSize).Int32Value()));
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- GenerateGcRootFieldLoad(cls,
- out_loc,
- Address(out,
- CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_)),
- /* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
- break;
- }
+ case HLoadClass::LoadKind::kDexCacheViaMethod:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
}
if (generate_null_check || cls->MustGenerateClinitCheck()) {
@@ -6196,11 +6205,11 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
break;
case HLoadString::LoadKind::kBootImageAddress:
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
}
return desired_string_load_kind;
}
@@ -6232,34 +6241,41 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
}
Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(
+ StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_string_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimeAddress: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ movl(out, Immediate(/* placeholder */ 0));
codegen_->RecordBootStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
Register method_address = locations->InAt(0).AsRegister<Register>();
__ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
codegen_->RecordBootStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address));
codegen_->RecordSimplePatch();
return; // No dex cache slow path.
@@ -6280,7 +6296,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewJitRootStringPatch(
- load->GetDexFile(), load->GetStringIndex());
+ load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
return;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 1af685087c..9eb97658da 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -412,11 +412,16 @@ class CodeGeneratorX86 : public CodeGenerator {
void RecordSimplePatch();
void RecordBootStringPatch(HLoadString* load_string);
- void RecordTypePatch(HLoadClass* load_class);
+ void RecordBootTypePatch(HLoadClass* load_class);
+ Label* NewTypeBssEntryPatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
- Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
- Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
+ Label* NewJitRootStringPatch(const DexFile& dex_file,
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle);
+ Label* NewJitRootClassPatch(const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ Handle<mirror::Class> handle);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
@@ -619,8 +624,10 @@ class CodeGeneratorX86 : public CodeGenerator {
ArenaDeque<Label> simple_patches_;
// String patch locations; type depends on configuration (app .bss or boot image PIC/non-PIC).
ArenaDeque<PatchInfo<Label>> string_patches_;
- // Type patch locations.
- ArenaDeque<PatchInfo<Label>> type_patches_;
+ // Type patch locations for boot image; type depends on configuration (boot image PIC/non-PIC).
+ ArenaDeque<PatchInfo<Label>> boot_image_type_patches_;
+ // Type patch locations for kBssEntry.
+ ArenaDeque<PatchInfo<Label>> type_bss_entry_patches_;
// Patches for string root accesses in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_string_patches_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 903844fcdb..74c71cce1f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -234,12 +234,12 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCode(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = at_->GetLocations();
+ LocationSummary* locations = instruction_->GetLocations();
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -249,7 +249,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)),
Immediate(cls_->GetTypeIndex().index_));
x86_64_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage : kQuickInitializeType,
- at_,
+ instruction_,
dex_pc_,
this);
if (do_clinit_) {
@@ -266,6 +266,15 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
}
RestoreLiveRegisters(codegen, locations);
+ // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
+ if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
+ DCHECK(out.IsValid());
+ __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
+ locations->Out().AsRegister<CpuRegister>());
+ Label* fixup_label = x86_64_codegen->NewTypeBssEntryPatch(cls_);
+ __ Bind(fixup_label);
+ }
__ jmp(GetExitLabel());
}
@@ -275,10 +284,6 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
// The class this slow path will load.
HLoadClass* const cls_;
- // The instruction where this slow path is happening.
- // (Might be the load class or an initialization check).
- HInstruction* const at_;
-
// The dex PC of `at_`.
const uint32_t dex_pc_;
@@ -300,9 +305,9 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex().index_;
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
// Custom calling convention: RAX serves as both input and output.
- __ movl(CpuRegister(RAX), Immediate(string_index));
+ __ movl(CpuRegister(RAX), Immediate(string_index.index_));
x86_64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
instruction_->GetDexPc(),
@@ -986,7 +991,7 @@ Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStat
Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
// Bind a new fixup label at the end of the "movl" insn.
uint32_t offset = invoke->GetDexCacheArrayOffset();
- __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset));
+ __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -1079,9 +1084,15 @@ void CodeGeneratorX86_64::RecordBootStringPatch(HLoadString* load_string) {
__ Bind(&string_patches_.back().label);
}
-void CodeGeneratorX86_64::RecordTypePatch(HLoadClass* load_class) {
- type_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
- __ Bind(&type_patches_.back().label);
+void CodeGeneratorX86_64::RecordBootTypePatch(HLoadClass* load_class) {
+ boot_image_type_patches_.emplace_back(load_class->GetDexFile(),
+ load_class->GetTypeIndex().index_);
+ __ Bind(&boot_image_type_patches_.back().label);
+}
+
+Label* CodeGeneratorX86_64::NewTypeBssEntryPatch(HLoadClass* load_class) {
+ type_bss_entry_patches_.emplace_back(load_class->GetDexFile(), load_class->GetTypeIndex().index_);
+ return &type_bss_entry_patches_.back().label;
}
Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
@@ -1118,7 +1129,8 @@ void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
pc_relative_dex_cache_patches_.size() +
simple_patches_.size() +
string_patches_.size() +
- type_patches_.size();
+ boot_image_type_patches_.size() +
+ type_bss_entry_patches_.size();
linker_patches->reserve(size);
EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
linker_patches);
@@ -1127,13 +1139,17 @@ void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_pat
linker_patches->push_back(LinkerPatch::RecordPosition(literal_offset));
}
if (!GetCompilerOptions().IsBootImage()) {
+ DCHECK(boot_image_type_patches_.empty());
EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(string_patches_, linker_patches);
} else {
- // These are always PC-relative, see GetSupportedLoadStringKind().
+ // These are always PC-relative, see GetSupportedLoadClassKind()/GetSupportedLoadStringKind().
+ EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(boot_image_type_patches_,
+ linker_patches);
EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(string_patches_, linker_patches);
}
- // These are always PC-relative, see GetSupportedLoadClassKind().
- EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(type_patches_, linker_patches);
+ EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
+ linker_patches);
+ DCHECK_EQ(size, linker_patches->size());
}
void CodeGeneratorX86_64::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -1214,7 +1230,8 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
simple_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
@@ -2423,6 +2440,14 @@ void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invo
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
+void LocationsBuilderX86_64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86_64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
+ codegen_->GenerateInvokePolymorphicCall(invoke);
+}
+
void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -4038,7 +4063,6 @@ void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(Location::RegisterLocation(RAX));
}
@@ -4055,7 +4079,7 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
DCHECK(!codegen_->IsLeafMethod());
}
}
@@ -4064,21 +4088,16 @@ void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(RAX));
- locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
- InvokeRuntimeCallingConvention calling_convention;
- codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)),
- instruction->GetTypeIndex().index_);
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
-
+ codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
}
@@ -5417,11 +5436,12 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kJitTableAddress:
- break;
- case HLoadClass::LoadKind::kDexCachePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kJitTableAddress:
+ DCHECK(Runtime::Current()->UseJitCompilation());
+ break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
break;
}
@@ -5429,15 +5449,16 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
}
void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
- if (cls->NeedsAccessCheck()) {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
InvokeRuntimeCallingConvention calling_convention;
- CodeGenerator::CreateLoadClassLocationSummary(
+ CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(
cls,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Location::RegisterLocation(RAX),
- /* code_generator_supports_read_barrier */ true);
+ Location::RegisterLocation(RAX));
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
@@ -5448,9 +5469,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
- HLoadClass::LoadKind load_kind = cls->GetLoadKind();
- if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
- load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
locations->SetInAt(0, Location::RequiresRegister());
}
locations->SetOut(Location::RequiresRegister());
@@ -5458,23 +5477,26 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
dex::TypeIndex dex_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(
+ TypeReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_class_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_class_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary* locations = cls->GetLocations();
- if (cls->NeedsAccessCheck()) {
- codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
- codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
+ HLoadClass::LoadKind load_kind = cls->GetLoadKind();
+ if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
+ codegen_->GenerateLoadClassRuntimeCall(cls);
return;
}
+ DCHECK(!cls->NeedsAccessCheck());
+ LocationSummary* locations = cls->GetLocations();
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
@@ -5482,7 +5504,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- switch (cls->GetLoadKind()) {
+ switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
@@ -5497,52 +5519,36 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
- codegen_->RecordTypePatch(cls);
+ codegen_->RecordBootTypePatch(cls);
break;
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address)); // Zero-extended.
codegen_->RecordSimplePatch();
break;
}
- case HLoadClass::LoadKind::kJitTableAddress: {
- Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ true);
- Label* fixup_label =
- codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress());
- // /* GcRoot<mirror::Class> */ out = *address
- GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- break;
- }
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- uint32_t offset = cls->GetDexCacheElementOffset();
- Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(cls->GetDexFile(), offset);
+ case HLoadClass::LoadKind::kBssEntry: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ false);
+ Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
+ generate_null_check = true;
break;
}
- case HLoadClass::LoadKind::kDexCacheViaMethod: {
- // /* GcRoot<mirror::Class>[] */ out =
- // current_method.ptr_sized_fields_->dex_cache_resolved_types_
- CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
- __ movq(out,
- Address(current_method,
- ArtMethod::DexCacheResolvedTypesOffset(kX86_64PointerSize).Int32Value()));
- // /* GcRoot<mirror::Class> */ out = out[type_index]
- GenerateGcRootFieldLoad(
- cls,
- out_loc,
- Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex().index_)),
- /* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
+ /* no_rip */ true);
+ Label* fixup_label =
+ codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
+ // /* GcRoot<mirror::Class> */ out = *address
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
break;
}
default:
@@ -5600,11 +5606,11 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kDexCacheViaMethod:
- break;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ break;
}
return desired_string_load_kind;
}
@@ -5631,28 +5637,34 @@ void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
}
Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file,
- dex::StringIndex dex_index) {
- jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u);
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle) {
+ jit_string_roots_.Overwrite(
+ StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_string_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_string_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
LocationSummary* locations = load->GetLocations();
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
+ DCHECK(codegen_->GetCompilerOptions().IsBootImage());
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
codegen_->RecordBootStringPatch(load);
return; // No dex cache slow path.
}
case HLoadString::LoadKind::kBootImageAddress: {
- DCHECK_NE(load->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(load->GetString().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address)); // Zero-extended.
codegen_->RecordSimplePatch();
return; // No dex cache slow path.
@@ -5673,8 +5685,8 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ true);
- Label* fixup_label =
- codegen_->NewJitRootStringPatch(load->GetDexFile(), load->GetStringIndex());
+ Label* fixup_label = codegen_->NewJitRootStringPatch(
+ load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
return;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index f827e79a94..3438b8159f 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -409,11 +409,16 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void RecordSimplePatch();
void RecordBootStringPatch(HLoadString* load_string);
- void RecordTypePatch(HLoadClass* load_class);
+ void RecordBootTypePatch(HLoadClass* load_class);
+ Label* NewTypeBssEntryPatch(HLoadClass* load_class);
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
- Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
- Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
+ Label* NewJitRootStringPatch(const DexFile& dex_file,
+ dex::StringIndex dex_index,
+ Handle<mirror::String> handle);
+ Label* NewJitRootClassPatch(const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ Handle<mirror::Class> handle);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
@@ -602,8 +607,10 @@ class CodeGeneratorX86_64 : public CodeGenerator {
ArenaDeque<Label> simple_patches_;
// String patch locations; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PatchInfo<Label>> string_patches_;
- // Type patch locations.
- ArenaDeque<PatchInfo<Label>> type_patches_;
+ // Type patch locations for boot image (always PIC).
+ ArenaDeque<PatchInfo<Label>> boot_image_type_patches_;
+ // Type patch locations for kBssEntry.
+ ArenaDeque<PatchInfo<Label>> type_bss_entry_patches_;
// Fixups for jump tables need to be handled specially.
ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
index 10a36c6ff4..cfcb276a98 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.cc
@@ -59,29 +59,15 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
}
private:
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
- // If this is a load with PC-relative access to the dex cache types array,
- // we need to add the dex cache arrays base as the special input.
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCachePcRelative) {
- // Initialize base for target dex file if needed.
- const DexFile& dex_file = load_class->GetDexFile();
- HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(dex_file);
- // Update the element offset in base.
- DexCacheArraysLayout layout(kArmPointerSize, &dex_file);
- base->UpdateElementOffset(layout.TypeOffset(load_class->GetTypeIndex()));
- // Add the special argument base to the load.
- load_class->AddSpecialInput(base);
- }
- }
-
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
// If this is an invoke with PC-relative access to the dex cache methods array,
// we need to add the dex cache arrays base as the special input.
if (invoke->HasPcRelativeDexCache() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderARMType>(invoke, codegen_)) {
- HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(invoke->GetDexFile());
+ HArmDexCacheArraysBase* base =
+ GetOrCreateDexCacheArrayBase(invoke, invoke->GetDexFileForPcRelativeDexCache());
// Update the element offset in base.
- DexCacheArraysLayout layout(kArmPointerSize, &invoke->GetDexFile());
+ DexCacheArraysLayout layout(kArmPointerSize, &invoke->GetDexFileForPcRelativeDexCache());
base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
// Add the special argument base to the method.
DCHECK(!invoke->HasCurrentMethodInput());
@@ -89,21 +75,28 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
}
}
- HArmDexCacheArraysBase* GetOrCreateDexCacheArrayBase(const DexFile& dex_file) {
- // Ensure we only initialize the pointer once for each dex file.
- auto lb = dex_cache_array_bases_.lower_bound(&dex_file);
- if (lb != dex_cache_array_bases_.end() &&
- !dex_cache_array_bases_.key_comp()(&dex_file, lb->first)) {
- return lb->second;
- }
+ HArmDexCacheArraysBase* GetOrCreateDexCacheArrayBase(HInstruction* cursor,
+ const DexFile& dex_file) {
+ if (GetGraph()->HasIrreducibleLoops()) {
+ HArmDexCacheArraysBase* base = new (GetGraph()->GetArena()) HArmDexCacheArraysBase(dex_file);
+ cursor->GetBlock()->InsertInstructionBefore(base, cursor);
+ return base;
+ } else {
+ // Ensure we only initialize the pointer once for each dex file.
+ auto lb = dex_cache_array_bases_.lower_bound(&dex_file);
+ if (lb != dex_cache_array_bases_.end() &&
+ !dex_cache_array_bases_.key_comp()(&dex_file, lb->first)) {
+ return lb->second;
+ }
- // Insert the base at the start of the entry block, move it to a better
- // position later in MoveBaseIfNeeded().
- HArmDexCacheArraysBase* base = new (GetGraph()->GetArena()) HArmDexCacheArraysBase(dex_file);
- HBasicBlock* entry_block = GetGraph()->GetEntryBlock();
- entry_block->InsertInstructionBefore(base, entry_block->GetFirstInstruction());
- dex_cache_array_bases_.PutBefore(lb, &dex_file, base);
- return base;
+ // Insert the base at the start of the entry block, move it to a better
+ // position later in MoveBaseIfNeeded().
+ HArmDexCacheArraysBase* base = new (GetGraph()->GetArena()) HArmDexCacheArraysBase(dex_file);
+ HBasicBlock* entry_block = GetGraph()->GetEntryBlock();
+ entry_block->InsertInstructionBefore(base, entry_block->GetFirstInstruction());
+ dex_cache_array_bases_.PutBefore(lb, &dex_file, base);
+ return base;
+ }
}
CodeGeneratorARMType* codegen_;
@@ -114,11 +107,6 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
};
void DexCacheArrayFixups::Run() {
- if (graph_->HasIrreducibleLoops()) {
- // Do not run this optimization, as irreducible loops do not work with an instruction
- // that can be live-in at the irreducible loop header.
- return;
- }
DexCacheArrayFixupsVisitor visitor(graph_, codegen_);
visitor.VisitInsertionOrder();
visitor.MoveBasesIfNeeded();
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.cc b/compiler/optimizing/dex_cache_array_fixups_mips.cc
index 31fff26dd5..04a4294c48 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.cc
@@ -53,30 +53,16 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
}
private:
- void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
- // If this is a load with PC-relative access to the dex cache types array,
- // we need to add the dex cache arrays base as the special input.
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCachePcRelative) {
- // Initialize base for target dex file if needed.
- const DexFile& dex_file = load_class->GetDexFile();
- HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(dex_file);
- // Update the element offset in base.
- DexCacheArraysLayout layout(kMipsPointerSize, &dex_file);
- base->UpdateElementOffset(layout.TypeOffset(load_class->GetTypeIndex()));
- // Add the special argument base to the load.
- load_class->AddSpecialInput(base);
- }
- }
-
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
// If this is an invoke with PC-relative access to the dex cache methods array,
// we need to add the dex cache arrays base as the special input.
if (invoke->HasPcRelativeDexCache() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderMIPS>(invoke, codegen_)) {
// Initialize base for target method dex file if needed.
- HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(invoke->GetDexFile());
+ HMipsDexCacheArraysBase* base =
+ GetOrCreateDexCacheArrayBase(invoke->GetDexFileForPcRelativeDexCache());
// Update the element offset in base.
- DexCacheArraysLayout layout(kMipsPointerSize, &invoke->GetDexFile());
+ DexCacheArraysLayout layout(kMipsPointerSize, &invoke->GetDexFileForPcRelativeDexCache());
base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
// Add the special argument base to the method.
DCHECK(!invoke->HasCurrentMethodInput());
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 09dcefa02c..f6fba883bd 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -464,6 +464,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
}
+ void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ VisitInvoke(invoke);
+ StartAttributeStream("invoke_type") << "InvokePolymorphic";
+ }
+
void VisitInstanceFieldGet(HInstanceFieldGet* iget) OVERRIDE {
StartAttributeStream("field_name") <<
iget->GetFieldInfo().GetDexFile().PrettyField(iget->GetFieldInfo().GetFieldIndex(),
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 437d35ccb7..f8d37bd714 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -28,7 +28,6 @@ class GVNTest : public CommonCompilerTest {};
TEST_F(GVNTest, LocalFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -45,53 +44,53 @@ TEST_F(GVNTest, LocalFieldElimination) {
entry->AddSuccessor(block);
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* to_remove = block->GetLastInstruction();
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(43),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* different_offset = block->GetLastInstruction();
// Kill the value.
block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* use_after_kill = block->GetLastInstruction();
block->AddInstruction(new (&allocator) HExit());
@@ -113,7 +112,6 @@ TEST_F(GVNTest, LocalFieldElimination) {
TEST_F(GVNTest, GlobalFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -129,13 +127,13 @@ TEST_F(GVNTest, GlobalFieldElimination) {
graph->AddBlock(block);
entry->AddSuccessor(block);
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
@@ -152,33 +150,33 @@ TEST_F(GVNTest, GlobalFieldElimination) {
else_->AddSuccessor(join);
then->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
then->AddInstruction(new (&allocator) HGoto());
else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
else_->AddInstruction(new (&allocator) HGoto());
join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
join->AddInstruction(new (&allocator) HExit());
@@ -196,7 +194,6 @@ TEST_F(GVNTest, GlobalFieldElimination) {
TEST_F(GVNTest, LoopFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -213,13 +210,13 @@ TEST_F(GVNTest, LoopFieldElimination) {
graph->AddBlock(block);
entry->AddSuccessor(block);
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
block->AddInstruction(new (&allocator) HGoto());
@@ -236,13 +233,13 @@ TEST_F(GVNTest, LoopFieldElimination) {
loop_body->AddSuccessor(loop_header);
loop_header->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
@@ -251,35 +248,35 @@ TEST_F(GVNTest, LoopFieldElimination) {
// and the body to be GVN'ed.
loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_set = loop_body->GetLastInstruction();
loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
loop_body->AddInstruction(new (&allocator) HGoto());
exit->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
HInstruction* field_get_in_exit = exit->GetLastInstruction();
exit->AddInstruction(new (&allocator) HExit());
@@ -319,7 +316,6 @@ TEST_F(GVNTest, LoopFieldElimination) {
TEST_F(GVNTest, LoopSideEffects) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- ScopedNullHandle<mirror::DexCache> dex_cache;
static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC();
@@ -376,13 +372,13 @@ TEST_F(GVNTest, LoopSideEffects) {
// Make one block with a side effect.
entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0));
SideEffectsAnalysis side_effects(graph);
@@ -401,13 +397,13 @@ TEST_F(GVNTest, LoopSideEffects) {
outer_loop_body->InsertInstructionBefore(
new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0),
outer_loop_body->GetLastInstruction());
@@ -427,13 +423,13 @@ TEST_F(GVNTest, LoopSideEffects) {
inner_loop_body->InsertInstructionBefore(
new (&allocator) HInstanceFieldSet(parameter,
parameter,
+ nullptr,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0),
inner_loop_body->GetLastInstruction());
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index d5c4c2fa69..3973985338 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -114,12 +114,7 @@ static bool IsMaxAtHint(
}
} else {
*suitable = instruction;
- while (instruction->IsArrayLength() ||
- instruction->IsNullCheck() ||
- instruction->IsNewArray()) {
- instruction = instruction->InputAt(0);
- }
- return instruction == hint;
+ return HuntForDeclaration(instruction) == hint;
}
return false;
}
@@ -368,10 +363,14 @@ void InductionVarRange::Replace(HInstruction* instruction,
}
}
-bool InductionVarRange::IsFinite(HLoopInformation* loop) const {
+bool InductionVarRange::IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const {
HInductionVarAnalysis::InductionInfo *trip =
induction_analysis_->LookupInfo(loop, GetLoopControl(loop));
- return trip != nullptr && !IsUnsafeTripCount(trip);
+ if (trip != nullptr && !IsUnsafeTripCount(trip)) {
+ IsConstant(trip->op_a, kExact, tc);
+ return true;
+ }
+ return false;
}
//
@@ -625,7 +624,7 @@ InductionVarRange::Value InductionVarRange::GetFetch(HInstruction* instruction,
if (chase_hint_ == nullptr) {
return is_min ? Value(0) : Value(std::numeric_limits<int32_t>::max());
} else if (instruction->InputAt(0)->IsNewArray()) {
- return GetFetch(instruction->InputAt(0)->InputAt(0), trip, in_body, is_min);
+ return GetFetch(instruction->InputAt(0)->AsNewArray()->GetLength(), trip, in_body, is_min);
}
} else if (instruction->IsTypeConversion()) {
// Since analysis is 32-bit (or narrower), chase beyond widening along the path.
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index ba14847d82..6c424b78b9 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -150,9 +150,9 @@ class InductionVarRange {
}
/**
- * Checks if header logic of a loop terminates.
+ * Checks if header logic of a loop terminates. Sets trip-count tc if known.
*/
- bool IsFinite(HLoopInformation* loop) const;
+ bool IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const;
private:
/*
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index aa3e1aab4f..d81817fb09 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -697,13 +697,8 @@ TEST_F(InductionVarRangeTest, MaxValue) {
}
TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
- HInstruction* new_array = new (&allocator_)
- HNewArray(x_,
- graph_->GetCurrentMethod(),
- 0,
- dex::TypeIndex(Primitive::kPrimInt),
- graph_->GetDexFile(),
- kQuickAllocArray);
+ // We pass a bogus constant for the class to avoid mocking one.
+ HInstruction* new_array = new (&allocator_) HNewArray(x_, x_, 0);
entry_block_->AddInstruction(new_array);
HInstruction* array_length = new (&allocator_) HArrayLength(new_array, 0);
entry_block_->AddInstruction(array_length);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3b83e95071..5d40f75618 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -308,8 +308,10 @@ ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
}
bool HInliner::TryInline(HInvoke* invoke_instruction) {
- if (invoke_instruction->IsInvokeUnresolved()) {
- return false; // Don't bother to move further if we know the method is unresolved.
+ if (invoke_instruction->IsInvokeUnresolved() ||
+ invoke_instruction->IsInvokePolymorphic()) {
+ return false; // Don't bother to move further if we know the method is unresolved or an
+ // invoke-polymorphic.
}
ScopedObjectAccess soa(Thread::Current());
@@ -429,13 +431,13 @@ HInstanceFieldGet* HInliner::BuildGetReceiverClass(ClassLinker* class_linker,
DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
HInstanceFieldGet* result = new (graph_->GetArena()) HInstanceFieldGet(
receiver,
+ field,
Primitive::kPrimNot,
field->GetOffset(),
field->IsVolatile(),
field->GetDexFieldIndex(),
field->GetDeclaringClass()->GetDexClassDefIndex(),
*field->GetDexFile(),
- handles_->NewHandle(field->GetDexCache()),
dex_pc);
// The class of a field is effectively final, and does not have any memory dependencies.
result->SetSideEffects(SideEffects::None());
@@ -472,10 +474,10 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
HInstruction* receiver = invoke_instruction->InputAt(0);
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- Handle<mirror::Class> handle = handles_->NewHandle(GetMonomorphicType(classes));
+ Handle<mirror::Class> monomorphic_type = handles_->NewHandle(GetMonomorphicType(classes));
if (!TryInlineAndReplace(invoke_instruction,
resolved_method,
- ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+ ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true),
/* do_rtp */ false,
/* cha_devirtualize */ false)) {
return false;
@@ -486,7 +488,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
cursor,
bb_cursor,
class_index,
- GetMonomorphicType(classes),
+ monomorphic_type,
invoke_instruction,
/* with_deoptimization */ true);
@@ -531,11 +533,9 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
HInstruction* cursor,
HBasicBlock* bb_cursor,
dex::TypeIndex class_index,
- mirror::Class* klass,
+ Handle<mirror::Class> klass,
HInstruction* invoke_instruction,
bool with_deoptimization) {
- ScopedAssertNoThreadSuspension sants("Adding compiler type guard");
-
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
HInstanceFieldGet* receiver_class = BuildGetReceiverClass(
class_linker, receiver, invoke_instruction->GetDexPc());
@@ -546,19 +546,20 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
}
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
- bool is_referrer = (klass == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+ bool is_referrer = (klass.Get() == outermost_graph_->GetArtMethod()->GetDeclaringClass());
// Note that we will just compare the classes, so we don't need Java semantics access checks.
// Note that the type index and the dex file are relative to the method this type guard is
// inlined into.
HLoadClass* load_class = new (graph_->GetArena()) HLoadClass(graph_->GetCurrentMethod(),
class_index,
caller_dex_file,
+ klass,
is_referrer,
invoke_instruction->GetDexPc(),
/* needs_access_check */ false);
bb_cursor->InsertInstructionAfter(load_class, receiver_class);
// Sharpen after adding the instruction, as the sharpening may remove inputs.
- HSharpening::SharpenClass(load_class, klass, handles_, codegen_, compiler_driver_);
+ HSharpening::SharpenClass(load_class, codegen_, compiler_driver_);
// TODO: Extend reference type propagation to understand the guard.
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
@@ -618,6 +619,9 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
} else {
one_target_inlined = true;
+ VLOG(compiler) << "Polymorphic call to " << ArtMethod::PrettyMethod(resolved_method)
+ << " has inlined " << ArtMethod::PrettyMethod(method);
+
// If we have inlined all targets before, and this receiver is the last seen,
// we deoptimize instead of keeping the original invoke instruction.
bool deoptimize = all_targets_inlined &&
@@ -632,7 +636,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
cursor,
bb_cursor,
class_index,
- handle.Get(),
+ handle,
invoke_instruction,
deoptimize);
if (deoptimize) {
@@ -655,6 +659,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
<< " of its targets could be inlined";
return false;
}
+
MaybeRecordStat(kInlinedPolymorphicCall);
// Run type propagation to get the guards typed.
@@ -1161,13 +1166,13 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex
DCHECK(resolved_field != nullptr);
HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet(
obj,
+ resolved_field,
resolved_field->GetTypeAsPrimitiveType(),
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
*dex_cache->GetDexFile(),
- dex_cache,
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
/* dex_pc */ 0);
@@ -1190,13 +1195,13 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(Handle<mirror::DexCache> dex
HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet(
obj,
value,
+ resolved_field,
resolved_field->GetTypeAsPrimitiveType(),
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
*dex_cache->GetDexFile(),
- dex_cache,
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
/* dex_pc */ 0);
@@ -1424,24 +1429,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
return false;
}
- if (current->IsNewInstance() &&
- (current->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectWithAccessCheck)) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it is using an entrypoint"
- << " with access checks";
- // Allocation entrypoint does not handle inlined frames.
- return false;
- }
-
- if (current->IsNewArray() &&
- (current->AsNewArray()->GetEntrypoint() == kQuickAllocArrayWithAccessCheck)) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it is using an entrypoint"
- << " with access checks";
- // Allocation entrypoint does not handle inlined frames.
- return false;
- }
-
if (current->IsUnresolvedStaticFieldGet() ||
current->IsUnresolvedInstanceFieldGet() ||
current->IsUnresolvedStaticFieldSet() ||
@@ -1542,8 +1529,6 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod*
}
}
- PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
-
// Iterate over the list of parameter types and test whether any of the
// actual inputs has a more specific reference type than the type declared in
// the signature.
@@ -1555,9 +1540,9 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod*
++param_idx, ++input_idx) {
HInstruction* input = invoke_instruction->InputAt(input_idx);
if (input->GetType() == Primitive::kPrimNot) {
- mirror::Class* param_cls = resolved_method->GetDexCacheResolvedType(
+ mirror::Class* param_cls = resolved_method->GetClassFromTypeIndex(
param_list->GetTypeItem(param_idx).type_idx_,
- pointer_size);
+ /* resolve */ false);
if (IsReferenceTypeRefinement(GetClassRTI(param_cls),
/* declared_can_be_null */ true,
input)) {
@@ -1579,6 +1564,13 @@ bool HInliner::ReturnTypeMoreSpecific(HInvoke* invoke_instruction,
/* declared_can_be_null */ true,
return_replacement)) {
return true;
+ } else if (return_replacement->IsInstanceFieldGet()) {
+ HInstanceFieldGet* field_get = return_replacement->AsInstanceFieldGet();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (field_get->GetFieldInfo().GetField() ==
+ class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0)) {
+ return true;
+ }
}
} else if (return_replacement->IsInstanceOf()) {
// Inlining InstanceOf into an If may put a tighter bound on reference types.
@@ -1599,8 +1591,7 @@ void HInliner::FixUpReturnReferenceType(ArtMethod* resolved_method,
// TODO: we could be more precise by merging the phi inputs but that requires
// some functionality from the reference type propagation.
DCHECK(return_replacement->IsPhi());
- PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- mirror::Class* cls = resolved_method->GetReturnType(false /* resolve */, pointer_size);
+ mirror::Class* cls = resolved_method->GetReturnType(false /* resolve */);
return_replacement->SetReferenceTypeInfo(GetClassRTI(cls));
}
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 4c0b990f26..11aacab802 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -170,7 +170,7 @@ class HInliner : public HOptimization {
HInstruction* cursor,
HBasicBlock* bb_cursor,
dex::TypeIndex class_index,
- mirror::Class* klass,
+ Handle<mirror::Class> klass,
HInstruction* invoke_instruction,
bool with_deoptimization)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index af8e2c8a7c..ef8d74dce0 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1,3 +1,4 @@
+
/*
* Copyright (C) 2016 The Android Open Source Project
*
@@ -207,10 +208,8 @@ void HInstructionBuilder::InitializeInstruction(HInstruction* instruction) {
HEnvironment* environment = new (arena_) HEnvironment(
arena_,
current_locals_->size(),
- graph_->GetDexFile(),
- graph_->GetMethodIdx(),
+ graph_->GetArtMethod(),
instruction->GetDexPc(),
- graph_->GetInvokeType(),
instruction);
environment->CopyFrom(*current_locals_);
instruction->SetRawEnvironment(environment);
@@ -906,51 +905,69 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
false /* is_unresolved */);
}
+bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction ATTRIBUTE_UNUSED,
+ uint32_t dex_pc,
+ uint32_t method_idx,
+ uint32_t proto_idx,
+ uint32_t number_of_vreg_arguments,
+ bool is_range,
+ uint32_t* args,
+ uint32_t register_index) {
+ const char* descriptor = dex_file_->GetShorty(proto_idx);
+ DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), number_of_vreg_arguments);
+ Primitive::Type return_type = Primitive::GetType(descriptor[0]);
+ size_t number_of_arguments = strlen(descriptor);
+ HInvoke* invoke = new (arena_) HInvokePolymorphic(arena_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx);
+ return HandleInvoke(invoke,
+ number_of_vreg_arguments,
+ args,
+ register_index,
+ is_range,
+ descriptor,
+ nullptr /* clinit_check */,
+ false /* is_unresolved */);
+}
+
bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
- const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- bool finalizable;
- bool needs_access_check = NeedsAccessCheck(type_index, dex_cache, &finalizable);
-
- // Only the non-resolved entrypoint handles the finalizable class case. If we
- // need access checks, then we haven't resolved the method and the class may
- // again be finalizable.
- QuickEntrypointEnum entrypoint = (finalizable || needs_access_check)
- ? kQuickAllocObject
- : kQuickAllocObjectInitialized;
-
if (outer_dex_cache.Get() != dex_cache.Get()) {
// We currently do not support inlining allocations across dex files.
return false;
}
- HLoadClass* load_class = new (arena_) HLoadClass(
- graph_->GetCurrentMethod(),
- type_index,
- outer_dex_file,
- IsOutermostCompilingClass(type_index),
- dex_pc,
- needs_access_check);
+ HLoadClass* load_class = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
- AppendInstruction(load_class);
HInstruction* cls = load_class;
- if (!IsInitialized(resolved_class)) {
+ Handle<mirror::Class> klass = load_class->GetClass();
+
+ if (!IsInitialized(klass)) {
cls = new (arena_) HClinitCheck(load_class, dex_pc);
AppendInstruction(cls);
}
+ // Only the access check entrypoint handles the finalizable class case. If we
+ // need access checks, then we haven't resolved the method and the class may
+ // again be finalizable.
+ QuickEntrypointEnum entrypoint = kQuickAllocObjectInitialized;
+ if (load_class->NeedsAccessCheck() || klass->IsFinalizable() || !klass->IsInstantiable()) {
+ entrypoint = kQuickAllocObjectWithChecks;
+ }
+
+ // Consider classes we haven't resolved as potentially finalizable.
+ bool finalizable = (klass.Get() == nullptr) || klass->IsFinalizable();
+
AppendInstruction(new (arena_) HNewInstance(
cls,
- graph_->GetCurrentMethod(),
dex_pc,
type_index,
*dex_compilation_unit_->GetDexFile(),
- needs_access_check,
finalizable,
entrypoint));
return true;
@@ -991,7 +1008,6 @@ HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
ArtMethod* resolved_method,
uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
- const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
@@ -1019,15 +1035,9 @@ HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
} else if (storage_index.IsValid()) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
- HLoadClass* load_class = new (arena_) HLoadClass(
- graph_->GetCurrentMethod(),
- storage_index,
- outer_dex_file,
- is_outer_class,
- dex_pc,
- /*needs_access_check*/ false);
- AppendInstruction(load_class);
- clinit_check = new (arena_) HClinitCheck(load_class, dex_pc);
+ HLoadClass* cls = BuildLoadClass(
+ storage_index, dex_pc, /* check_access */ false, /* outer */ true);
+ clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
AppendInstruction(clinit_check);
}
return clinit_check;
@@ -1235,13 +1245,13 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
field_set = new (arena_) HInstanceFieldSet(object,
value,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_compilation_unit_->GetDexCache(),
dex_pc);
}
AppendInstruction(field_set);
@@ -1256,13 +1266,13 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
} else {
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
field_get = new (arena_) HInstanceFieldGet(object,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_compilation_unit_->GetDexCache(),
dex_pc);
}
AppendInstruction(field_get);
@@ -1311,9 +1321,9 @@ bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) c
}
void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
- uint32_t dex_pc,
- bool is_put,
- Primitive::Type field_type) {
+ uint32_t dex_pc,
+ bool is_put,
+ Primitive::Type field_type) {
uint32_t source_or_dest_reg = instruction.VRegA_21c();
uint16_t field_index = instruction.VRegB_21c();
@@ -1349,7 +1359,6 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
- const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
@@ -1377,16 +1386,10 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
}
- HLoadClass* constant = new (arena_) HLoadClass(graph_->GetCurrentMethod(),
- storage_index,
- outer_dex_file,
- is_outer_class,
- dex_pc,
- /*needs_access_check*/ false);
- AppendInstruction(constant);
+ HLoadClass* constant = BuildLoadClass(
+ storage_index, dex_pc, /* check_access */ false, /* outer */ true);
HInstruction* cls = constant;
-
Handle<mirror::Class> klass(hs.NewHandle(resolved_field->GetDeclaringClass()));
if (!IsInitialized(klass)) {
cls = new (arena_) HClinitCheck(constant, dex_pc);
@@ -1400,23 +1403,23 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
DCHECK_EQ(HPhi::ToPhiType(value->GetType()), HPhi::ToPhiType(field_type));
AppendInstruction(new (arena_) HStaticFieldSet(cls,
value,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_cache_,
dex_pc));
} else {
AppendInstruction(new (arena_) HStaticFieldGet(cls,
+ resolved_field,
field_type,
resolved_field->GetOffset(),
resolved_field->IsVolatile(),
field_index,
class_def_index,
*dex_file_,
- dex_cache_,
dex_pc));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
@@ -1495,16 +1498,8 @@ void HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
uint32_t* args,
uint32_t register_index) {
HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
- bool finalizable;
- QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index, &finalizable)
- ? kQuickAllocArrayWithAccessCheck
- : kQuickAllocArray;
- HInstruction* object = new (arena_) HNewArray(length,
- graph_->GetCurrentMethod(),
- dex_pc,
- type_index,
- *dex_compilation_unit_->GetDexFile(),
- entrypoint);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ HInstruction* object = new (arena_) HNewArray(cls, length, dex_pc);
AppendInstruction(object);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
@@ -1633,33 +1628,57 @@ static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
}
}
-void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
- uint8_t destination,
- uint8_t reference,
- dex::TypeIndex type_index,
- uint32_t dex_pc) {
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
+ uint32_t dex_pc,
+ bool check_access,
+ bool outer) {
ScopedObjectAccess soa(Thread::Current());
+ const DexCompilationUnit* compilation_unit =
+ outer ? outer_compilation_unit_ : dex_compilation_unit_;
+ const DexFile& dex_file = *compilation_unit->GetDexFile();
StackHandleScope<1> hs(soa.Self());
- const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
-
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(),
- dex_cache,
- type_index);
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::Class> klass = handles_->NewHandle(compiler_driver_->ResolveClass(
+ soa, compilation_unit->GetDexCache(), class_loader, type_index, compilation_unit));
+
+ bool is_accessible = false;
+ if (!check_access) {
+ is_accessible = true;
+ } else if (klass.Get() != nullptr) {
+ if (klass->IsPublic()) {
+ is_accessible = true;
+ } else {
+ mirror::Class* compiling_class = GetCompilingClass();
+ if (compiling_class != nullptr && compiling_class->CanAccess(klass.Get())) {
+ is_accessible = true;
+ }
+ }
+ }
- HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
- HLoadClass* cls = new (arena_) HLoadClass(
+ HLoadClass* load_class = new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
dex_file,
- IsOutermostCompilingClass(type_index),
+ klass,
+ klass.Get() != nullptr && (klass.Get() == GetOutermostCompilingClass()),
dex_pc,
- !can_access);
- AppendInstruction(cls);
+ !is_accessible);
- TypeCheckKind check_kind = ComputeTypeCheckKind(resolved_class);
+ AppendInstruction(load_class);
+ return load_class;
+}
+
+void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ dex::TypeIndex type_index,
+ uint32_t dex_pc) {
+ HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+
+ ScopedObjectAccess soa(Thread::Current());
+ TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
if (instruction.Opcode() == Instruction::INSTANCE_OF) {
AppendInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc));
UpdateLocal(destination, current_block_->GetLastInstruction());
@@ -1916,6 +1935,37 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
break;
}
+ case Instruction::INVOKE_POLYMORPHIC: {
+ uint16_t method_idx = instruction.VRegB_45cc();
+ uint16_t proto_idx = instruction.VRegH_45cc();
+ uint32_t number_of_vreg_arguments = instruction.VRegA_45cc();
+ uint32_t args[5];
+ instruction.GetVarArgs(args);
+ return BuildInvokePolymorphic(instruction,
+ dex_pc,
+ method_idx,
+ proto_idx,
+ number_of_vreg_arguments,
+ false,
+ args,
+ -1);
+ }
+
+ case Instruction::INVOKE_POLYMORPHIC_RANGE: {
+ uint16_t method_idx = instruction.VRegB_4rcc();
+ uint16_t proto_idx = instruction.VRegH_4rcc();
+ uint32_t number_of_vreg_arguments = instruction.VRegA_4rcc();
+ uint32_t register_index = instruction.VRegC_4rcc();
+ return BuildInvokePolymorphic(instruction,
+ dex_pc,
+ method_idx,
+ proto_idx,
+ number_of_vreg_arguments,
+ true,
+ nullptr,
+ register_index);
+ }
+
case Instruction::NEG_INT: {
Unop_12x<HNeg>(instruction, Primitive::kPrimInt, dex_pc);
break;
@@ -2449,16 +2499,8 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::NEW_ARRAY: {
dex::TypeIndex type_index(instruction.VRegC_22c());
HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt);
- bool finalizable;
- QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index, &finalizable)
- ? kQuickAllocArrayWithAccessCheck
- : kQuickAllocArray;
- AppendInstruction(new (arena_) HNewArray(length,
- graph_->GetCurrentMethod(),
- dex_pc,
- type_index,
- *dex_compilation_unit_->GetDexFile(),
- entrypoint));
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+ AppendInstruction(new (arena_) HNewArray(cls, length, dex_pc));
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
break;
}
@@ -2632,21 +2674,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::CONST_CLASS: {
dex::TypeIndex type_index(instruction.VRegB_21c());
- // `CanAccessTypeWithoutChecks` will tell whether the method being
- // built is trying to access its own class, so that the generated
- // code can optimize for this case. However, the optimization does not
- // work for inlining, so we use `IsOutermostCompilingClass` instead.
- ScopedObjectAccess soa(Thread::Current());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), dex_cache, type_index);
- AppendInstruction(new (arena_) HLoadClass(
- graph_->GetCurrentMethod(),
- type_index,
- *dex_file_,
- IsOutermostCompilingClass(type_index),
- dex_pc,
- !can_access));
+ BuildLoadClass(type_index, dex_pc, /* check_access */ true);
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index f29e522040..5efe95094c 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -46,9 +46,11 @@ class HInstructionBuilder : public ValueObject {
CompilerDriver* driver,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
- Handle<mirror::DexCache> dex_cache)
+ Handle<mirror::DexCache> dex_cache,
+ VariableSizedHandleScope* handles)
: arena_(graph->GetArena()),
graph_(graph),
+ handles_(handles),
dex_file_(dex_file),
code_item_(code_item),
return_type_(return_type),
@@ -175,6 +177,17 @@ class HInstructionBuilder : public ValueObject {
uint32_t* args,
uint32_t register_index);
+ // Builds an invocation node for invoke-polymorphic and returns whether the
+ // instruction is supported.
+ bool BuildInvokePolymorphic(const Instruction& instruction,
+ uint32_t dex_pc,
+ uint32_t method_idx,
+ uint32_t proto_idx,
+ uint32_t number_of_vreg_arguments,
+ bool is_range,
+ uint32_t* args,
+ uint32_t register_index);
+
// Builds a new array node and the instructions that fill it.
void BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
@@ -212,6 +225,14 @@ class HInstructionBuilder : public ValueObject {
// Builds an instruction sequence for a switch statement.
void BuildSwitch(const Instruction& instruction, uint32_t dex_pc);
+ // Builds a `HLoadClass` loading the given `type_index`. If `outer` is true,
+ // this method will use the outer class's dex file to lookup the type at
+ // `type_index`.
+ HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
+ uint32_t dex_pc,
+ bool check_access,
+ bool outer = false);
+
// Returns the outer-most compiling method's class.
mirror::Class* GetOutermostCompilingClass() const;
@@ -271,6 +292,7 @@ class HInstructionBuilder : public ValueObject {
ArenaAllocator* const arena_;
HGraph* const graph_;
+ VariableSizedHandleScope* handles_;
// The dex file where the method being compiled is, and the bytecode data.
const DexFile* const dex_file_;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index dbafb0b16e..35f59cb4a4 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -777,7 +777,7 @@ void InstructionSimplifierVisitor::VisitArrayLength(HArrayLength* instruction) {
// If the array is a NewArray with constant size, replace the array length
// with the constant instruction. This helps the bounds check elimination phase.
if (input->IsNewArray()) {
- input = input->InputAt(0);
+ input = input->AsNewArray()->GetLength();
if (input->IsIntConstant()) {
instruction->ReplaceWith(input);
}
@@ -1118,7 +1118,66 @@ void InstructionSimplifierVisitor::VisitAboveOrEqual(HAboveOrEqual* condition) {
VisitCondition(condition);
}
+// Recognize the following pattern:
+// obj.getClass() ==/!= Foo.class
+// And replace it with a constant value if the type of `obj` is statically known.
+static bool RecognizeAndSimplifyClassCheck(HCondition* condition) {
+ HInstruction* input_one = condition->InputAt(0);
+ HInstruction* input_two = condition->InputAt(1);
+ HLoadClass* load_class = input_one->IsLoadClass()
+ ? input_one->AsLoadClass()
+ : input_two->AsLoadClass();
+ if (load_class == nullptr) {
+ return false;
+ }
+
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ if (!class_rti.IsValid()) {
+ // Unresolved class.
+ return false;
+ }
+
+ HInstanceFieldGet* field_get = (load_class == input_one)
+ ? input_two->AsInstanceFieldGet()
+ : input_one->AsInstanceFieldGet();
+ if (field_get == nullptr) {
+ return false;
+ }
+
+ HInstruction* receiver = field_get->InputAt(0);
+ ReferenceTypeInfo receiver_type = receiver->GetReferenceTypeInfo();
+ if (!receiver_type.IsExact()) {
+ return false;
+ }
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0);
+ DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
+ if (field_get->GetFieldInfo().GetField() != field) {
+ return false;
+ }
+
+ // We can replace the compare.
+ int value = 0;
+ if (receiver_type.IsEqual(class_rti)) {
+ value = condition->IsEqual() ? 1 : 0;
+ } else {
+ value = condition->IsNotEqual() ? 1 : 0;
+ }
+ condition->ReplaceWith(condition->GetBlock()->GetGraph()->GetIntConstant(value));
+ return true;
+ }
+}
+
void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
+ if (condition->IsEqual() || condition->IsNotEqual()) {
+ if (RecognizeAndSimplifyClassCheck(condition)) {
+ return;
+ }
+ }
+
// Reverse condition if left is constant. Our code generators prefer constant
// on the right hand side.
if (condition->GetLeft()->IsConstant() && !condition->GetRight()->IsConstant()) {
@@ -1715,7 +1774,7 @@ static bool IsArrayLengthOf(HInstruction* potential_length, HInstruction* potent
}
if (potential_array->IsNewArray()) {
- return potential_array->InputAt(0) == potential_length;
+ return potential_array->AsNewArray()->GetLength() == potential_length;
}
return false;
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index fc6ff7b197..17d683f357 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -145,7 +145,7 @@ void IntrinsicsRecognizer::Run() {
if (!CheckInvokeType(intrinsic, invoke)) {
LOG(WARNING) << "Found an intrinsic with unexpected invoke type: "
<< intrinsic << " for "
- << invoke->GetDexFile().PrettyMethod(invoke->GetDexMethodIndex())
+ << art_method->PrettyMethod()
<< invoke->DebugName();
} else {
invoke->SetIntrinsic(intrinsic,
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index e9c6615870..f1ae549928 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -744,14 +744,55 @@ void IntrinsicCodeGeneratorMIPS::VisitLongBitCount(HInvoke* invoke) {
GenBitCount(invoke->GetLocations(), Primitive::kPrimLong, IsR6(), GetAssembler());
}
-static void MathAbsFP(LocationSummary* locations, bool is64bit, MipsAssembler* assembler) {
+static void MathAbsFP(LocationSummary* locations,
+ bool is64bit,
+ bool isR2OrNewer,
+ bool isR6,
+ MipsAssembler* assembler) {
FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
FRegister out = locations->Out().AsFpuRegister<FRegister>();
- if (is64bit) {
- __ AbsD(out, in);
+ // Note, as a "quality of implementation", rather than pure "spec compliance", we require that
+ // Math.abs() clears the sign bit (but changes nothing else) for all numbers, including NaN
+ // (signaling NaN may become quiet though).
+ //
+ // The ABS.fmt instructions (abs.s and abs.d) do exactly that when NAN2008=1 (R6). For this case,
+ // both regular floating point numbers and NAN values are treated alike, only the sign bit is
+ // affected by this instruction.
+ // But when NAN2008=0 (R2 and before), the ABS.fmt instructions can't be used. For this case, any
+ // NaN operand signals invalid operation. This means that other bits (not just sign bit) might be
+ // changed when doing abs(NaN). Because of that, we clear sign bit in a different way.
+ if (isR6) {
+ if (is64bit) {
+ __ AbsD(out, in);
+ } else {
+ __ AbsS(out, in);
+ }
} else {
- __ AbsS(out, in);
+ if (is64bit) {
+ if (in != out) {
+ __ MovD(out, in);
+ }
+ __ MoveFromFpuHigh(TMP, in);
+ // ins instruction is not available for R1.
+ if (isR2OrNewer) {
+ __ Ins(TMP, ZERO, 31, 1);
+ } else {
+ __ Sll(TMP, TMP, 1);
+ __ Srl(TMP, TMP, 1);
+ }
+ __ MoveToFpuHigh(TMP, out);
+ } else {
+ __ Mfc1(TMP, in);
+ // ins instruction is not available for R1.
+ if (isR2OrNewer) {
+ __ Ins(TMP, ZERO, 31, 1);
+ } else {
+ __ Sll(TMP, TMP, 1);
+ __ Srl(TMP, TMP, 1);
+ }
+ __ Mtc1(TMP, out);
+ }
}
}
@@ -761,7 +802,7 @@ void IntrinsicLocationsBuilderMIPS::VisitMathAbsDouble(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsDouble(HInvoke* invoke) {
- MathAbsFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MathAbsFP(invoke->GetLocations(), /* is64bit */ true, IsR2OrNewer(), IsR6(), GetAssembler());
}
// float java.lang.Math.abs(float)
@@ -770,7 +811,7 @@ void IntrinsicLocationsBuilderMIPS::VisitMathAbsFloat(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsFloat(HInvoke* invoke) {
- MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MathAbsFP(invoke->GetLocations(), /* is64bit */ false, IsR2OrNewer(), IsR6(), GetAssembler());
}
static void GenAbsInteger(LocationSummary* locations, bool is64bit, MipsAssembler* assembler) {
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 8c34dc6a86..5bcfa4c98b 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -111,20 +111,19 @@ TEST_F(LICMTest, FieldHoisting) {
BuildLoop();
// Populate the loop with instructions: set/get field with different types.
- ScopedNullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+ nullptr,
Primitive::kPrimLong,
MemberOffset(10),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph_->GetDexFile(),
- dex_cache,
0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
- parameter_, int_constant_, Primitive::kPrimInt, MemberOffset(20),
- false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), dex_cache, 0);
+ parameter_, int_constant_, nullptr, Primitive::kPrimInt, MemberOffset(20),
+ false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -140,24 +139,24 @@ TEST_F(LICMTest, NoFieldHoisting) {
// Populate the loop with instructions: set/get field with same types.
ScopedNullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+ nullptr,
Primitive::kPrimLong,
MemberOffset(10),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph_->GetDexFile(),
- dex_cache,
0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_,
get_field,
+ nullptr,
Primitive::kPrimLong,
MemberOffset(10),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph_->GetDexFile(),
- dex_cache,
0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 2856c3ea11..2d3c00fb97 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -943,6 +943,10 @@ class LSEVisitor : public HGraphVisitor {
HandleInvoke(invoke);
}
+ void VisitInvokePolymorphic(HInvokePolymorphic* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
HandleInvoke(clinit);
}
@@ -975,7 +979,7 @@ class LSEVisitor : public HGraphVisitor {
}
if (ref_info->IsSingletonAndRemovable() &&
!new_instance->IsFinalizable() &&
- !new_instance->NeedsAccessCheck()) {
+ !new_instance->NeedsChecks()) {
singleton_new_instances_.push_back(new_instance);
}
ArenaVector<HInstruction*>& heap_values =
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 9d73e29602..95838380cc 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -161,26 +161,27 @@ void HLoopOptimization::RemoveLoop(LoopNode* node) {
void HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
for ( ; node != nullptr; node = node->next) {
+ // Visit inner loops first.
int current_induction_simplification_count = induction_simplication_count_;
if (node->inner != nullptr) {
TraverseLoopsInnerToOuter(node->inner);
}
- // Visit loop after its inner loops have been visited. If the induction of any inner
- // loop has been simplified, recompute the induction information of this loop first.
+ // Recompute induction information of this loop if the induction
+ // of any inner loop has been simplified.
if (current_induction_simplification_count != induction_simplication_count_) {
induction_range_.ReVisit(node->loop_info);
}
- // Repeat simplifications until no more changes occur. Note that since
- // each simplification consists of eliminating code (without introducing
- // new code), this process is always finite.
+ // Repeat simplifications in the body of this loop until no more changes occur.
+ // Note that since each simplification consists of eliminating code (without
+ // introducing new code), this process is always finite.
do {
simplified_ = false;
- SimplifyBlocks(node);
SimplifyInduction(node);
+ SimplifyBlocks(node);
} while (simplified_);
- // Remove inner loops when empty.
+ // Simplify inner loop.
if (node->inner == nullptr) {
- RemoveIfEmptyInnerLoop(node);
+ SimplifyInnerLoop(node);
}
}
}
@@ -198,7 +199,7 @@ void HLoopOptimization::SimplifyInduction(LoopNode* node) {
iset_->clear();
int32_t use_count = 0;
if (IsPhiInduction(phi) &&
- IsOnlyUsedAfterLoop(node->loop_info, phi, &use_count) &&
+ IsOnlyUsedAfterLoop(node->loop_info, phi, /*collect_loop_uses*/ false, &use_count) &&
// No uses, or no early-exit with proper replacement.
(use_count == 0 ||
(!IsEarlyExit(node->loop_info) && TryReplaceWithLastValue(phi, preheader)))) {
@@ -206,7 +207,6 @@ void HLoopOptimization::SimplifyInduction(LoopNode* node) {
RemoveFromCycle(i);
}
simplified_ = true;
- induction_simplication_count_++;
}
}
}
@@ -216,24 +216,14 @@ void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
// Remove dead instructions from the loop-body.
- for (HBackwardInstructionIterator i(block->GetInstructions()); !i.Done(); i.Advance()) {
- HInstruction* instruction = i.Current();
- if (instruction->IsDeadAndRemovable()) {
- simplified_ = true;
- block->RemoveInstruction(instruction);
- }
- }
+ RemoveDeadInstructions(block->GetPhis());
+ RemoveDeadInstructions(block->GetInstructions());
// Remove trivial control flow blocks from the loop-body.
- HBasicBlock* succ = nullptr;
- if (IsGotoBlock(block, &succ) && succ->GetPredecessors().size() == 1) {
- // Trivial goto block can be removed.
- HBasicBlock* pred = block->GetSinglePredecessor();
+ if (block->GetPredecessors().size() == 1 &&
+ block->GetSuccessors().size() == 1 &&
+ block->GetSingleSuccessor()->GetPredecessors().size() == 1) {
simplified_ = true;
- pred->ReplaceSuccessor(block, succ);
- block->RemoveDominatedBlock(succ);
- block->DisconnectAndDelete();
- pred->AddDominatedBlock(succ);
- succ->SetDominator(pred);
+ block->MergeWith(block->GetSingleSuccessor());
} else if (block->GetSuccessors().size() == 2) {
// Trivial if block can be bypassed to either branch.
HBasicBlock* succ0 = block->GetSuccessors()[0];
@@ -258,55 +248,66 @@ void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
}
}
-void HLoopOptimization::RemoveIfEmptyInnerLoop(LoopNode* node) {
+bool HLoopOptimization::SimplifyInnerLoop(LoopNode* node) {
HBasicBlock* header = node->loop_info->GetHeader();
HBasicBlock* preheader = node->loop_info->GetPreHeader();
// Ensure loop header logic is finite.
- if (!induction_range_.IsFinite(node->loop_info)) {
- return;
+ int64_t tc = 0;
+ if (!induction_range_.IsFinite(node->loop_info, &tc)) {
+ return false;
}
// Ensure there is only a single loop-body (besides the header).
HBasicBlock* body = nullptr;
for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
if (it.Current() != header) {
if (body != nullptr) {
- return;
+ return false;
}
body = it.Current();
}
}
// Ensure there is only a single exit point.
if (header->GetSuccessors().size() != 2) {
- return;
+ return false;
}
HBasicBlock* exit = (header->GetSuccessors()[0] == body)
? header->GetSuccessors()[1]
: header->GetSuccessors()[0];
// Ensure exit can only be reached by exiting loop.
if (exit->GetPredecessors().size() != 1) {
- return;
+ return false;
}
- // Detect an empty loop: no side effects other than plain iteration. Replace
- // subsequent index uses, if any, with the last value and remove the loop.
+ // Detect either an empty loop (no side effects other than plain iteration) or
+ // a trivial loop (just iterating once). Replace subsequent index uses, if any,
+ // with the last value and remove the loop, possibly after unrolling its body.
+ HInstruction* phi = header->GetFirstPhi();
iset_->clear();
int32_t use_count = 0;
- if (IsEmptyHeader(header) &&
- IsEmptyBody(body) &&
- IsOnlyUsedAfterLoop(node->loop_info, header->GetFirstPhi(), &use_count) &&
- // No uses, or proper replacement.
- (use_count == 0 || TryReplaceWithLastValue(header->GetFirstPhi(), preheader))) {
- body->DisconnectAndDelete();
- exit->RemovePredecessor(header);
- header->RemoveSuccessor(exit);
- header->RemoveDominatedBlock(exit);
- header->DisconnectAndDelete();
- preheader->AddSuccessor(exit);
- preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
- preheader->AddDominatedBlock(exit);
- exit->SetDominator(preheader);
- // Update hierarchy.
- RemoveLoop(node);
+ if (IsEmptyHeader(header)) {
+ bool is_empty = IsEmptyBody(body);
+ if ((is_empty || tc == 1) &&
+ IsOnlyUsedAfterLoop(node->loop_info, phi, /*collect_loop_uses*/ true, &use_count) &&
+ // No uses, or proper replacement.
+ (use_count == 0 || TryReplaceWithLastValue(phi, preheader))) {
+ if (!is_empty) {
+ // Unroll the loop body, which sees initial value of the index.
+ phi->ReplaceWith(phi->InputAt(0));
+ preheader->MergeInstructionsWith(body);
+ }
+ body->DisconnectAndDelete();
+ exit->RemovePredecessor(header);
+ header->RemoveSuccessor(exit);
+ header->RemoveDominatedBlock(exit);
+ header->DisconnectAndDelete();
+ preheader->AddSuccessor(exit);
+ preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
+ preheader->AddDominatedBlock(exit);
+ exit->SetDominator(preheader);
+ RemoveLoop(node); // update hierarchy
+ return true;
+ }
}
+ return false;
}
bool HLoopOptimization::IsPhiInduction(HPhi* phi) {
@@ -374,12 +375,19 @@ bool HLoopOptimization::IsEmptyBody(HBasicBlock* block) {
bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
+ bool collect_loop_uses,
/*out*/ int32_t* use_count) {
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
if (iset_->find(user) == iset_->end()) { // not excluded?
HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
if (other_loop_info != nullptr && other_loop_info->IsIn(*loop_info)) {
+ // If collect_loop_uses is set, simply keep adding those uses to the set.
+ // Otherwise, reject uses inside the loop that were not already in the set.
+ if (collect_loop_uses) {
+ iset_->insert(user);
+ continue;
+ }
return false;
}
++*use_count;
@@ -388,40 +396,48 @@ bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
return true;
}
-void HLoopOptimization::ReplaceAllUses(HInstruction* instruction, HInstruction* replacement) {
- const HUseList<HInstruction*>& uses = instruction->GetUses();
- for (auto it = uses.begin(), end = uses.end(); it != end;) {
- HInstruction* user = it->GetUser();
- size_t index = it->GetIndex();
- ++it; // increment before replacing
- if (iset_->find(user) == iset_->end()) { // not excluded?
- user->ReplaceInput(replacement, index);
- induction_range_.Replace(user, instruction, replacement); // update induction
- }
- }
- const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
- for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
- HEnvironment* user = it->GetUser();
- size_t index = it->GetIndex();
- ++it; // increment before replacing
- if (iset_->find(user->GetHolder()) == iset_->end()) { // not excluded?
- user->RemoveAsUserOfInput(index);
- user->SetRawEnvAt(index, replacement);
- replacement->AddEnvUseAt(user, index);
- }
- }
-}
-
bool HLoopOptimization::TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block) {
// Try to replace outside uses with the last value. Environment uses can consume this
// value too, since any first true use is outside the loop (although this may imply
// that de-opting may look "ahead" a bit on the phi value). If there are only environment
// uses, the value is dropped altogether, since the computations have no effect.
if (induction_range_.CanGenerateLastValue(instruction)) {
- ReplaceAllUses(instruction, induction_range_.GenerateLastValue(instruction, graph_, block));
+ HInstruction* replacement = induction_range_.GenerateLastValue(instruction, graph_, block);
+ const HUseList<HInstruction*>& uses = instruction->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end;) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment before replacing
+ if (iset_->find(user) == iset_->end()) { // not excluded?
+ user->ReplaceInput(replacement, index);
+ induction_range_.Replace(user, instruction, replacement); // update induction
+ }
+ }
+ const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
+ for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
+ HEnvironment* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment before replacing
+ if (iset_->find(user->GetHolder()) == iset_->end()) { // not excluded?
+ user->RemoveAsUserOfInput(index);
+ user->SetRawEnvAt(index, replacement);
+ replacement->AddEnvUseAt(user, index);
+ }
+ }
+ induction_simplication_count_++;
return true;
}
return false;
}
+void HLoopOptimization::RemoveDeadInstructions(const HInstructionList& list) {
+ for (HBackwardInstructionIterator i(list); !i.Done(); i.Advance()) {
+ HInstruction* instruction = i.Current();
+ if (instruction->IsDeadAndRemovable()) {
+ simplified_ = true;
+ instruction->GetBlock()->RemoveInstructionOrPhi(instruction);
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 0f05b24c37..9ddab4150c 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -60,19 +60,21 @@ class HLoopOptimization : public HOptimization {
void TraverseLoopsInnerToOuter(LoopNode* node);
+ // Simplification.
void SimplifyInduction(LoopNode* node);
void SimplifyBlocks(LoopNode* node);
- void RemoveIfEmptyInnerLoop(LoopNode* node);
+ bool SimplifyInnerLoop(LoopNode* node);
+ // Helpers.
bool IsPhiInduction(HPhi* phi);
bool IsEmptyHeader(HBasicBlock* block);
bool IsEmptyBody(HBasicBlock* block);
-
bool IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
HInstruction* instruction,
+ bool collect_loop_uses,
/*out*/ int32_t* use_count);
- void ReplaceAllUses(HInstruction* instruction, HInstruction* replacement);
bool TryReplaceWithLastValue(HInstruction* instruction, HBasicBlock* block);
+ void RemoveDeadInstructions(const HInstructionList& list);
// Range information based on prior induction variable analysis.
InductionVarRange induction_range_;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index a599c2aa84..d15145e673 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1853,6 +1853,14 @@ void HBasicBlock::DisconnectAndDelete() {
SetGraph(nullptr);
}
+void HBasicBlock::MergeInstructionsWith(HBasicBlock* other) {
+ DCHECK(EndsWithControlFlowInstruction());
+ RemoveInstruction(GetLastInstruction());
+ instructions_.Add(other->GetInstructions());
+ other->instructions_.SetBlockOfInstructions(this);
+ other->instructions_.Clear();
+}
+
void HBasicBlock::MergeWith(HBasicBlock* other) {
DCHECK_EQ(GetGraph(), other->GetGraph());
DCHECK(ContainsElement(dominated_blocks_, other));
@@ -1861,11 +1869,7 @@ void HBasicBlock::MergeWith(HBasicBlock* other) {
DCHECK(other->GetPhis().IsEmpty());
// Move instructions from `other` to `this`.
- DCHECK(EndsWithControlFlowInstruction());
- RemoveInstruction(GetLastInstruction());
- instructions_.Add(other->GetInstructions());
- other->instructions_.SetBlockOfInstructions(this);
- other->instructions_.Clear();
+ MergeInstructionsWith(other);
// Remove `other` from the loops it is included in.
for (HLoopInformationOutwardIterator it(*other); !it.Done(); it.Advance()) {
@@ -2387,6 +2391,14 @@ bool HInvoke::NeedsEnvironment() const {
return !opt.GetDoesNotNeedEnvironment();
}
+const DexFile& HInvokeStaticOrDirect::GetDexFileForPcRelativeDexCache() const {
+ ArtMethod* caller = GetEnvironment()->GetMethod();
+ ScopedObjectAccess soa(Thread::Current());
+ // `caller` is null for a top-level graph representing a method whose declaring
+ // class was not resolved.
+ return caller == nullptr ? GetBlock()->GetGraph()->GetDexFile() : *caller->GetDexFile();
+}
+
bool HInvokeStaticOrDirect::NeedsDexCacheOfDeclaringClass() const {
if (GetMethodLoadKind() != MethodLoadKind::kDexCacheViaMethod) {
return false;
@@ -2430,17 +2442,6 @@ std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckReq
}
}
-// Helper for InstructionDataEquals to fetch the mirror Class out
-// from a kJitTableAddress LoadClass kind.
-// NO_THREAD_SAFETY_ANALYSIS because even though we're accessing
-// mirrors, they are stored in a variable size handle scope which is always
-// visited during a pause. Also, the only caller of this helper
-// only uses the mirror for pointer comparison.
-static inline mirror::Class* AsMirrorInternal(uint64_t address)
- NO_THREAD_SAFETY_ANALYSIS {
- return reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr();
-}
-
bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
const HLoadClass* other_load_class = other->AsLoadClass();
// TODO: To allow GVN for HLoadClass from different dex files, we should compare the type
@@ -2451,11 +2452,12 @@ bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
}
switch (GetLoadKind()) {
case LoadKind::kBootImageAddress:
- return GetAddress() == other_load_class->GetAddress();
- case LoadKind::kJitTableAddress:
- return AsMirrorInternal(GetAddress()) == AsMirrorInternal(other_load_class->GetAddress());
+ case LoadKind::kJitTableAddress: {
+ ScopedObjectAccess soa(Thread::Current());
+ return GetClass().Get() == other_load_class->GetClass().Get();
+ }
default:
- DCHECK(HasTypeReference(GetLoadKind()) || HasDexCacheReference(GetLoadKind()));
+ DCHECK(HasTypeReference(GetLoadKind()));
return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
}
}
@@ -2486,10 +2488,10 @@ std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs) {
return os << "BootImageLinkTimePcRelative";
case HLoadClass::LoadKind::kBootImageAddress:
return os << "BootImageAddress";
+ case HLoadClass::LoadKind::kBssEntry:
+ return os << "BssEntry";
case HLoadClass::LoadKind::kJitTableAddress:
return os << "JitTableAddress";
- case HLoadClass::LoadKind::kDexCachePcRelative:
- return os << "DexCachePcRelative";
case HLoadClass::LoadKind::kDexCacheViaMethod:
return os << "DexCacheViaMethod";
default:
@@ -2506,16 +2508,18 @@ bool HLoadString::InstructionDataEquals(const HInstruction* other) const {
GetPackedFields() != other_load_string->GetPackedFields()) {
return false;
}
- LoadKind load_kind = GetLoadKind();
- if (HasAddress(load_kind)) {
- return GetAddress() == other_load_string->GetAddress();
- } else {
- DCHECK(HasStringReference(load_kind)) << load_kind;
- return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile());
+ switch (GetLoadKind()) {
+ case LoadKind::kBootImageAddress:
+ case LoadKind::kJitTableAddress: {
+ ScopedObjectAccess soa(Thread::Current());
+ return GetString().Get() == other_load_string->GetString().Get();
+ }
+ default:
+ return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile());
}
}
-void HLoadString::SetLoadKindInternal(LoadKind load_kind) {
+void HLoadString::SetLoadKind(LoadKind load_kind) {
// Once sharpened, the load kind should not be changed again.
DCHECK_EQ(GetLoadKind(), LoadKind::kDexCacheViaMethod);
SetPackedField<LoadKindField>(load_kind);
@@ -2540,10 +2544,10 @@ std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs) {
return os << "BootImageAddress";
case HLoadString::LoadKind::kBssEntry:
return os << "BssEntry";
- case HLoadString::LoadKind::kDexCacheViaMethod:
- return os << "DexCacheViaMethod";
case HLoadString::LoadKind::kJitTableAddress:
return os << "JitTableAddress";
+ case HLoadString::LoadKind::kDexCacheViaMethod:
+ return os << "DexCacheViaMethod";
default:
LOG(FATAL) << "Unknown HLoadString::LoadKind: " << static_cast<int>(rhs);
UNREACHABLE();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 8c64d25aee..a2980dca20 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -171,6 +171,7 @@ class HInstructionList : public ValueObject {
friend class HGraph;
friend class HInstruction;
friend class HInstructionIterator;
+ friend class HInstructionIteratorHandleChanges;
friend class HBackwardInstructionIterator;
DISALLOW_COPY_AND_ASSIGN(HInstructionList);
@@ -1096,6 +1097,9 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
// with a control flow instruction).
void ReplaceWith(HBasicBlock* other);
+ // Merges the instructions of `other` at the end of `this`.
+ void MergeInstructionsWith(HBasicBlock* other);
+
// Merge `other` at the end of `this`. This method updates loops, reverse post
// order, links to predecessors, successors, dominators and deletes the block
// from the graph. The two blocks must be successive, i.e. `this` the only
@@ -1290,6 +1294,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(InvokeInterface, Invoke) \
M(InvokeStaticOrDirect, Invoke) \
M(InvokeVirtual, Invoke) \
+ M(InvokePolymorphic, Invoke) \
M(LessThan, Condition) \
M(LessThanOrEqual, Condition) \
M(LoadClass, Instruction) \
@@ -1719,28 +1724,22 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
public:
HEnvironment(ArenaAllocator* arena,
size_t number_of_vregs,
- const DexFile& dex_file,
- uint32_t method_idx,
+ ArtMethod* method,
uint32_t dex_pc,
- InvokeType invoke_type,
HInstruction* holder)
: vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)),
locations_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentLocations)),
parent_(nullptr),
- dex_file_(dex_file),
- method_idx_(method_idx),
+ method_(method),
dex_pc_(dex_pc),
- invoke_type_(invoke_type),
holder_(holder) {
}
HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
: HEnvironment(arena,
to_copy.Size(),
- to_copy.GetDexFile(),
- to_copy.GetMethodIdx(),
+ to_copy.GetMethod(),
to_copy.GetDexPc(),
- to_copy.GetInvokeType(),
holder) {}
void SetAndCopyParentChain(ArenaAllocator* allocator, HEnvironment* parent) {
@@ -1789,16 +1788,8 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
return dex_pc_;
}
- uint32_t GetMethodIdx() const {
- return method_idx_;
- }
-
- InvokeType GetInvokeType() const {
- return invoke_type_;
- }
-
- const DexFile& GetDexFile() const {
- return dex_file_;
+ ArtMethod* GetMethod() const {
+ return method_;
}
HInstruction* GetHolder() const {
@@ -1814,10 +1805,8 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
ArenaVector<HUserRecord<HEnvironment*>> vregs_;
ArenaVector<Location> locations_;
HEnvironment* parent_;
- const DexFile& dex_file_;
- const uint32_t method_idx_;
+ ArtMethod* method_;
const uint32_t dex_pc_;
- const InvokeType invoke_type_;
// The instruction that holds this environment.
HInstruction* const holder_;
@@ -2312,6 +2301,9 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
};
std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
+// Iterates over the instructions, while preserving the next instruction
+// in case the current instruction gets removed from the list by the user
+// of this iterator.
class HInstructionIterator : public ValueObject {
public:
explicit HInstructionIterator(const HInstructionList& instructions)
@@ -2333,6 +2325,28 @@ class HInstructionIterator : public ValueObject {
DISALLOW_COPY_AND_ASSIGN(HInstructionIterator);
};
+// Iterates over the instructions without saving the next instruction,
+// therefore handling changes in the graph potentially made by the user
+// of this iterator.
+class HInstructionIteratorHandleChanges : public ValueObject {
+ public:
+ explicit HInstructionIteratorHandleChanges(const HInstructionList& instructions)
+ : instruction_(instructions.first_instruction_) {
+ }
+
+ bool Done() const { return instruction_ == nullptr; }
+ HInstruction* Current() const { return instruction_; }
+ void Advance() {
+ instruction_ = instruction_->GetNext();
+ }
+
+ private:
+ HInstruction* instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInstructionIteratorHandleChanges);
+};
+
+
class HBackwardInstructionIterator : public ValueObject {
public:
explicit HBackwardInstructionIterator(const HInstructionList& instructions)
@@ -3748,24 +3762,20 @@ class HCompare FINAL : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HCompare);
};
-class HNewInstance FINAL : public HExpression<2> {
+class HNewInstance FINAL : public HExpression<1> {
public:
HNewInstance(HInstruction* cls,
- HCurrentMethod* current_method,
uint32_t dex_pc,
dex::TypeIndex type_index,
const DexFile& dex_file,
- bool needs_access_check,
bool finalizable,
QuickEntrypointEnum entrypoint)
: HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc),
type_index_(type_index),
dex_file_(dex_file),
entrypoint_(entrypoint) {
- SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
SetPackedFlag<kFlagFinalizable>(finalizable);
SetRawInputAt(0, cls);
- SetRawInputAt(1, current_method);
}
dex::TypeIndex GetTypeIndex() const { return type_index_; }
@@ -3777,8 +3787,9 @@ class HNewInstance FINAL : public HExpression<2> {
// Can throw errors when out-of-memory or if it's not instantiable/accessible.
bool CanThrow() const OVERRIDE { return true; }
- // Needs to call into runtime to make sure it's instantiable/accessible.
- bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
+ bool NeedsChecks() const {
+ return entrypoint_ == kQuickAllocObjectWithChecks;
+ }
bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); }
@@ -3790,13 +3801,21 @@ class HNewInstance FINAL : public HExpression<2> {
entrypoint_ = entrypoint;
}
+ HLoadClass* GetLoadClass() const {
+ HInstruction* input = InputAt(0);
+ if (input->IsClinitCheck()) {
+ input = input->InputAt(0);
+ }
+ DCHECK(input->IsLoadClass());
+ return input->AsLoadClass();
+ }
+
bool IsStringAlloc() const;
DECLARE_INSTRUCTION(NewInstance);
private:
- static constexpr size_t kFlagNeedsAccessCheck = kNumberOfExpressionPackedBits;
- static constexpr size_t kFlagFinalizable = kFlagNeedsAccessCheck + 1;
+ static constexpr size_t kFlagFinalizable = kNumberOfExpressionPackedBits;
static constexpr size_t kNumberOfNewInstancePackedBits = kFlagFinalizable + 1;
static_assert(kNumberOfNewInstancePackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
@@ -3842,7 +3861,6 @@ class HInvoke : public HVariableInputSizeInstruction {
Primitive::Type GetType() const OVERRIDE { return GetPackedField<ReturnTypeField>(); }
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
- const DexFile& GetDexFile() const { return GetEnvironment()->GetDexFile(); }
InvokeType GetInvokeType() const {
return GetPackedField<InvokeTypeField>();
@@ -3959,6 +3977,28 @@ class HInvokeUnresolved FINAL : public HInvoke {
DISALLOW_COPY_AND_ASSIGN(HInvokeUnresolved);
};
+class HInvokePolymorphic FINAL : public HInvoke {
+ public:
+ HInvokePolymorphic(ArenaAllocator* arena,
+ uint32_t number_of_arguments,
+ Primitive::Type return_type,
+ uint32_t dex_pc,
+ uint32_t dex_method_index)
+ : HInvoke(arena,
+ number_of_arguments,
+ 0u /* number_of_other_inputs */,
+ return_type,
+ dex_pc,
+ dex_method_index,
+ nullptr,
+ kVirtual) {}
+
+ DECLARE_INSTRUCTION(InvokePolymorphic);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HInvokePolymorphic);
+};
+
class HInvokeStaticOrDirect FINAL : public HInvoke {
public:
// Requirements of this method call regarding the class
@@ -4140,6 +4180,8 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
return dispatch_info_.method_load_data;
}
+ const DexFile& GetDexFileForPcRelativeDexCache() const;
+
ClinitCheckRequirement GetClinitCheckRequirement() const {
return GetPackedField<ClinitCheckRequirementField>();
}
@@ -4322,23 +4364,12 @@ class HNeg FINAL : public HUnaryOperation {
class HNewArray FINAL : public HExpression<2> {
public:
- HNewArray(HInstruction* length,
- HCurrentMethod* current_method,
- uint32_t dex_pc,
- dex::TypeIndex type_index,
- const DexFile& dex_file,
- QuickEntrypointEnum entrypoint)
- : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc),
- type_index_(type_index),
- dex_file_(dex_file),
- entrypoint_(entrypoint) {
- SetRawInputAt(0, length);
- SetRawInputAt(1, current_method);
+ HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc)
+ : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc) {
+ SetRawInputAt(0, cls);
+ SetRawInputAt(1, length);
}
- dex::TypeIndex GetTypeIndex() const { return type_index_; }
- const DexFile& GetDexFile() const { return dex_file_; }
-
// Calls runtime so needs an environment.
bool NeedsEnvironment() const OVERRIDE { return true; }
@@ -4347,15 +4378,18 @@ class HNewArray FINAL : public HExpression<2> {
bool CanBeNull() const OVERRIDE { return false; }
- QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; }
+ HLoadClass* GetLoadClass() const {
+ DCHECK(InputAt(0)->IsLoadClass());
+ return InputAt(0)->AsLoadClass();
+ }
+
+ HInstruction* GetLength() const {
+ return InputAt(1);
+ }
DECLARE_INSTRUCTION(NewArray);
private:
- const dex::TypeIndex type_index_;
- const DexFile& dex_file_;
- const QuickEntrypointEnum entrypoint_;
-
DISALLOW_COPY_AND_ASSIGN(HNewArray);
};
@@ -5056,60 +5090,62 @@ class HNullCheck FINAL : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HNullCheck);
};
+// Embeds an ArtField and all the information required by the compiler. We cache
+// that information to avoid requiring the mutator lock every time we need it.
class FieldInfo : public ValueObject {
public:
- FieldInfo(MemberOffset field_offset,
+ FieldInfo(ArtField* field,
+ MemberOffset field_offset,
Primitive::Type field_type,
bool is_volatile,
uint32_t index,
uint16_t declaring_class_def_index,
- const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache)
- : field_offset_(field_offset),
+ const DexFile& dex_file)
+ : field_(field),
+ field_offset_(field_offset),
field_type_(field_type),
is_volatile_(is_volatile),
index_(index),
declaring_class_def_index_(declaring_class_def_index),
- dex_file_(dex_file),
- dex_cache_(dex_cache) {}
+ dex_file_(dex_file) {}
+ ArtField* GetField() const { return field_; }
MemberOffset GetFieldOffset() const { return field_offset_; }
Primitive::Type GetFieldType() const { return field_type_; }
uint32_t GetFieldIndex() const { return index_; }
uint16_t GetDeclaringClassDefIndex() const { return declaring_class_def_index_;}
const DexFile& GetDexFile() const { return dex_file_; }
bool IsVolatile() const { return is_volatile_; }
- Handle<mirror::DexCache> GetDexCache() const { return dex_cache_; }
private:
+ ArtField* const field_;
const MemberOffset field_offset_;
const Primitive::Type field_type_;
const bool is_volatile_;
const uint32_t index_;
const uint16_t declaring_class_def_index_;
const DexFile& dex_file_;
- const Handle<mirror::DexCache> dex_cache_;
};
class HInstanceFieldGet FINAL : public HExpression<1> {
public:
HInstanceFieldGet(HInstruction* value,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetRawInputAt(0, value);
}
@@ -5145,22 +5181,22 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
public:
HInstanceFieldSet(HInstruction* object,
HInstruction* value,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetPackedFlag<kFlagValueCanBeNull>(true);
SetRawInputAt(0, object);
SetRawInputAt(1, value);
@@ -5397,10 +5433,10 @@ class HBoundsCheck FINAL : public HExpression<2> {
HBoundsCheck(HInstruction* index,
HInstruction* length,
uint32_t dex_pc,
- uint32_t string_char_at_method_index = DexFile::kDexNoIndex)
- : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc),
- string_char_at_method_index_(string_char_at_method_index) {
+ bool string_char_at = false)
+ : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
DCHECK_EQ(Primitive::kPrimInt, Primitive::PrimitiveKind(index->GetType()));
+ SetPackedFlag<kFlagIsStringCharAt>(string_char_at);
SetRawInputAt(0, index);
SetRawInputAt(1, length);
}
@@ -5414,22 +5450,14 @@ class HBoundsCheck FINAL : public HExpression<2> {
bool CanThrow() const OVERRIDE { return true; }
- bool IsStringCharAt() const { return GetStringCharAtMethodIndex() != DexFile::kDexNoIndex; }
- uint32_t GetStringCharAtMethodIndex() const { return string_char_at_method_index_; }
+ bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); }
HInstruction* GetIndex() const { return InputAt(0); }
DECLARE_INSTRUCTION(BoundsCheck);
private:
- // We treat a String as an array, creating the HBoundsCheck from String.charAt()
- // intrinsic in the instruction simplifier. We want to include the String.charAt()
- // in the stack trace if we actually throw the StringIndexOutOfBoundsException,
- // so we need to create an HEnvironment which will be translated to an InlineInfo
- // indicating the extra stack frame. Since we add this HEnvironment quite late,
- // in the PrepareForRegisterAllocation pass, we need to remember the method index
- // from the invoke as we don't want to look again at the dex bytecode.
- uint32_t string_char_at_method_index_; // DexFile::kDexNoIndex if regular array.
+ static constexpr size_t kFlagIsStringCharAt = kNumberOfExpressionPackedBits;
DISALLOW_COPY_AND_ASSIGN(HBoundsCheck);
};
@@ -5497,14 +5525,13 @@ class HLoadClass FINAL : public HInstruction {
// GetIncludePatchInformation().
kBootImageAddress,
+ // Load from an entry in the .bss section using a PC-relative load.
+ // Used for classes outside boot image when .bss is accessible with a PC-relative load.
+ kBssEntry,
+
// Load from the root table associated with the JIT compiled method.
kJitTableAddress,
- // Load from resolved types array in the dex cache using a PC-relative load.
- // Used for classes outside boot image when we know that we can access
- // the dex cache arrays using a PC-relative load.
- kDexCachePcRelative,
-
// Load from resolved types array accessed through the class loaded from
// the compiled method's own ArtMethod*. This is the default access type when
// all other types are unavailable.
@@ -5516,6 +5543,7 @@ class HLoadClass FINAL : public HInstruction {
HLoadClass(HCurrentMethod* current_method,
dex::TypeIndex type_index,
const DexFile& dex_file,
+ Handle<mirror::Class> klass,
bool is_referrers_class,
uint32_t dex_pc,
bool needs_access_check)
@@ -5523,6 +5551,7 @@ class HLoadClass FINAL : public HInstruction {
special_input_(HUserRecord<HInstruction*>(current_method)),
type_index_(type_index),
dex_file_(dex_file),
+ klass_(klass),
loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
// Referrers class should not need access check. We never inline unverified
// methods so we can't possibly end up in this situation.
@@ -5531,14 +5560,11 @@ class HLoadClass FINAL : public HInstruction {
SetPackedField<LoadKindField>(
is_referrers_class ? LoadKind::kReferrersClass : LoadKind::kDexCacheViaMethod);
SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
- SetPackedFlag<kFlagIsInDexCache>(false);
SetPackedFlag<kFlagIsInBootImage>(false);
SetPackedFlag<kFlagGenerateClInitCheck>(false);
}
- void SetLoadKindWithAddress(LoadKind load_kind, uint64_t address) {
- DCHECK(HasAddress(load_kind));
- load_data_.address = address;
+ void SetLoadKind(LoadKind load_kind) {
SetLoadKindInternal(load_kind);
}
@@ -5551,15 +5577,6 @@ class HLoadClass FINAL : public HInstruction {
SetLoadKindInternal(load_kind);
}
- void SetLoadKindWithDexCacheReference(LoadKind load_kind,
- const DexFile& dex_file,
- uint32_t element_index) {
- DCHECK(HasDexCacheReference(load_kind));
- DCHECK(IsSameDexFile(dex_file_, dex_file));
- load_data_.dex_cache_element_index = element_index;
- SetLoadKindInternal(load_kind);
- }
-
LoadKind GetLoadKind() const {
return GetPackedField<LoadKindField>();
}
@@ -5584,13 +5601,21 @@ class HLoadClass FINAL : public HInstruction {
}
bool CanCallRuntime() const {
- return MustGenerateClinitCheck() ||
- (!IsReferrersClass() && !IsInDexCache()) ||
- NeedsAccessCheck();
+ return NeedsAccessCheck() ||
+ MustGenerateClinitCheck() ||
+ GetLoadKind() == LoadKind::kDexCacheViaMethod ||
+ GetLoadKind() == LoadKind::kBssEntry;
}
bool CanThrow() const OVERRIDE {
- return CanCallRuntime();
+ return NeedsAccessCheck() ||
+ MustGenerateClinitCheck() ||
+ // If the class is in the boot image, the lookup in the runtime call cannot throw.
+ // This keeps CanThrow() consistent between non-PIC (using kBootImageAddress) and
+ // PIC and subsequently avoids a DCE behavior dependency on the PIC option.
+ ((GetLoadKind() == LoadKind::kDexCacheViaMethod ||
+ GetLoadKind() == LoadKind::kBssEntry) &&
+ !IsInBootImage());
}
ReferenceTypeInfo GetLoadedClassRTI() {
@@ -5606,15 +5631,8 @@ class HLoadClass FINAL : public HInstruction {
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
- uint32_t GetDexCacheElementOffset() const;
-
- uint64_t GetAddress() const {
- DCHECK(HasAddress(GetLoadKind()));
- return load_data_.address;
- }
-
bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
- return !IsReferrersClass();
+ return GetLoadKind() == LoadKind::kDexCacheViaMethod;
}
static SideEffects SideEffectsForArchRuntimeCalls() {
@@ -5623,17 +5641,9 @@ class HLoadClass FINAL : public HInstruction {
bool IsReferrersClass() const { return GetLoadKind() == LoadKind::kReferrersClass; }
bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
- bool IsInDexCache() const { return GetPackedFlag<kFlagIsInDexCache>(); }
bool IsInBootImage() const { return GetPackedFlag<kFlagIsInBootImage>(); }
bool MustGenerateClinitCheck() const { return GetPackedFlag<kFlagGenerateClInitCheck>(); }
- void MarkInDexCache() {
- SetPackedFlag<kFlagIsInDexCache>(true);
- DCHECK(!NeedsEnvironment());
- RemoveEnvironment();
- SetSideEffects(SideEffects::None());
- }
-
void MarkInBootImage() {
SetPackedFlag<kFlagIsInBootImage>(true);
}
@@ -5650,12 +5660,15 @@ class HLoadClass FINAL : public HInstruction {
return Primitive::kPrimNot;
}
+ Handle<mirror::Class> GetClass() const {
+ return klass_;
+ }
+
DECLARE_INSTRUCTION(LoadClass);
private:
static constexpr size_t kFlagNeedsAccessCheck = kNumberOfGenericPackedBits;
- static constexpr size_t kFlagIsInDexCache = kFlagNeedsAccessCheck + 1;
- static constexpr size_t kFlagIsInBootImage = kFlagIsInDexCache + 1;
+ static constexpr size_t kFlagIsInBootImage = kFlagNeedsAccessCheck + 1;
// Whether this instruction must generate the initialization check.
// Used for code generation.
static constexpr size_t kFlagGenerateClInitCheck = kFlagIsInBootImage + 1;
@@ -5667,35 +5680,24 @@ class HLoadClass FINAL : public HInstruction {
using LoadKindField = BitField<LoadKind, kFieldLoadKind, kFieldLoadKindSize>;
static bool HasTypeReference(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageLinkTimeAddress ||
+ return load_kind == LoadKind::kReferrersClass ||
+ load_kind == LoadKind::kBootImageLinkTimeAddress ||
load_kind == LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == LoadKind::kDexCacheViaMethod ||
- load_kind == LoadKind::kReferrersClass;
- }
-
- static bool HasAddress(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageAddress ||
- load_kind == LoadKind::kJitTableAddress;
- }
-
- static bool HasDexCacheReference(LoadKind load_kind) {
- return load_kind == LoadKind::kDexCachePcRelative;
+ load_kind == LoadKind::kBssEntry ||
+ load_kind == LoadKind::kDexCacheViaMethod;
}
void SetLoadKindInternal(LoadKind load_kind);
// The special input is the HCurrentMethod for kDexCacheViaMethod or kReferrersClass.
// For other load kinds it's empty or possibly some architecture-specific instruction
- // for PC-relative loads, i.e. kDexCachePcRelative or kBootImageLinkTimePcRelative.
+ // for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
- union {
- uint32_t dex_cache_element_index; // Only for dex cache reference.
- uint64_t address; // Up to 64-bit, needed for kJitTableAddress on 64-bit targets.
- } load_data_;
+ Handle<mirror::Class> klass_;
ReferenceTypeInfo loaded_class_rti_;
@@ -5704,19 +5706,13 @@ class HLoadClass FINAL : public HInstruction {
std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs);
// Note: defined outside class to see operator<<(., HLoadClass::LoadKind).
-inline uint32_t HLoadClass::GetDexCacheElementOffset() const {
- DCHECK(HasDexCacheReference(GetLoadKind())) << GetLoadKind();
- return load_data_.dex_cache_element_index;
-}
-
-// Note: defined outside class to see operator<<(., HLoadClass::LoadKind).
inline void HLoadClass::AddSpecialInput(HInstruction* special_input) {
// The special input is used for PC-relative loads on some architectures,
// including literal pool loads, which are PC-relative too.
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
- GetLoadKind() == LoadKind::kDexCachePcRelative ||
GetLoadKind() == LoadKind::kBootImageLinkTimeAddress ||
- GetLoadKind() == LoadKind::kBootImageAddress) << GetLoadKind();
+ GetLoadKind() == LoadKind::kBootImageAddress ||
+ GetLoadKind() == LoadKind::kBssEntry) << GetLoadKind();
DCHECK(special_input_.GetInstruction() == nullptr);
special_input_ = HUserRecord<HInstruction*>(special_input);
special_input->AddUseAt(this, 0);
@@ -5744,15 +5740,15 @@ class HLoadString FINAL : public HInstruction {
// Used for strings outside boot image when .bss is accessible with a PC-relative load.
kBssEntry,
+ // Load from the root table associated with the JIT compiled method.
+ kJitTableAddress,
+
// Load from resolved strings array accessed through the class loaded from
// the compiled method's own ArtMethod*. This is the default access type when
// all other types are unavailable.
kDexCacheViaMethod,
- // Load from the root table associated with the JIT compiled method.
- kJitTableAddress,
-
- kLast = kJitTableAddress,
+ kLast = kDexCacheViaMethod,
};
HLoadString(HCurrentMethod* current_method,
@@ -5761,39 +5757,31 @@ class HLoadString FINAL : public HInstruction {
uint32_t dex_pc)
: HInstruction(SideEffectsForArchRuntimeCalls(), dex_pc),
special_input_(HUserRecord<HInstruction*>(current_method)),
- string_index_(string_index) {
+ string_index_(string_index),
+ dex_file_(dex_file) {
SetPackedField<LoadKindField>(LoadKind::kDexCacheViaMethod);
- load_data_.dex_file_ = &dex_file;
- }
-
- void SetLoadKindWithAddress(LoadKind load_kind, uint64_t address) {
- DCHECK(HasAddress(load_kind));
- load_data_.address = address;
- SetLoadKindInternal(load_kind);
}
- void SetLoadKindWithStringReference(LoadKind load_kind,
- const DexFile& dex_file,
- dex::StringIndex string_index) {
- DCHECK(HasStringReference(load_kind));
- load_data_.dex_file_ = &dex_file;
- string_index_ = string_index;
- SetLoadKindInternal(load_kind);
- }
+ void SetLoadKind(LoadKind load_kind);
LoadKind GetLoadKind() const {
return GetPackedField<LoadKindField>();
}
- const DexFile& GetDexFile() const;
+ const DexFile& GetDexFile() const {
+ return dex_file_;
+ }
dex::StringIndex GetStringIndex() const {
return string_index_;
}
- uint64_t GetAddress() const {
- DCHECK(HasAddress(GetLoadKind()));
- return load_data_.address;
+ Handle<mirror::String> GetString() const {
+ return string_;
+ }
+
+ void SetString(Handle<mirror::String> str) {
+ string_ = str;
}
bool CanBeMoved() const OVERRIDE { return true; }
@@ -5848,45 +5836,23 @@ class HLoadString FINAL : public HInstruction {
static_assert(kNumberOfLoadStringPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using LoadKindField = BitField<LoadKind, kFieldLoadKind, kFieldLoadKindSize>;
- static bool HasStringReference(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageLinkTimeAddress ||
- load_kind == LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == LoadKind::kBssEntry ||
- load_kind == LoadKind::kDexCacheViaMethod ||
- load_kind == LoadKind::kJitTableAddress;
- }
-
- static bool HasAddress(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageAddress;
- }
-
void SetLoadKindInternal(LoadKind load_kind);
// The special input is the HCurrentMethod for kDexCacheViaMethod.
// For other load kinds it's empty or possibly some architecture-specific instruction
- // for PC-relative loads, i.e. kDexCachePcRelative or kBootImageLinkTimePcRelative.
+ // for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
- // String index serves also as the hash code and it's also needed for slow-paths,
- // so it must not be overwritten with other load data.
dex::StringIndex string_index_;
+ const DexFile& dex_file_;
- union {
- const DexFile* dex_file_; // For string reference.
- uint64_t address; // Up to 64-bit, needed for kDexCacheAddress on 64-bit targets.
- } load_data_;
+ Handle<mirror::String> string_;
DISALLOW_COPY_AND_ASSIGN(HLoadString);
};
std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs);
// Note: defined outside class to see operator<<(., HLoadString::LoadKind).
-inline const DexFile& HLoadString::GetDexFile() const {
- DCHECK(HasStringReference(GetLoadKind())) << GetLoadKind();
- return *load_data_.dex_file_;
-}
-
-// Note: defined outside class to see operator<<(., HLoadString::LoadKind).
inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
// The special input is used for PC-relative loads on some architectures,
// including literal pool loads, which are PC-relative too.
@@ -5926,7 +5892,10 @@ class HClinitCheck FINAL : public HExpression<1> {
bool CanThrow() const OVERRIDE { return true; }
- HLoadClass* GetLoadClass() const { return InputAt(0)->AsLoadClass(); }
+ HLoadClass* GetLoadClass() const {
+ DCHECK(InputAt(0)->IsLoadClass());
+ return InputAt(0)->AsLoadClass();
+ }
DECLARE_INSTRUCTION(ClinitCheck);
@@ -5937,22 +5906,22 @@ class HClinitCheck FINAL : public HExpression<1> {
class HStaticFieldGet FINAL : public HExpression<1> {
public:
HStaticFieldGet(HInstruction* cls,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetRawInputAt(0, cls);
}
@@ -5985,22 +5954,22 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
public:
HStaticFieldSet(HInstruction* cls,
HInstruction* value,
+ ArtField* field,
Primitive::Type field_type,
MemberOffset field_offset,
bool is_volatile,
uint32_t field_idx,
uint16_t declaring_class_def_index,
const DexFile& dex_file,
- Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
- field_info_(field_offset,
+ field_info_(field,
+ field_offset,
field_type,
is_volatile,
field_idx,
declaring_class_def_index,
- dex_file,
- dex_cache) {
+ dex_file) {
SetPackedFlag<kFlagValueCanBeNull>(true);
SetRawInputAt(0, cls);
SetRawInputAt(1, value);
@@ -6792,6 +6761,23 @@ inline void MakeRoomFor(ArenaVector<HBasicBlock*>* blocks,
std::copy_backward(blocks->begin() + after + 1u, blocks->begin() + old_size, blocks->end());
}
+/*
+ * Hunt "under the hood" of array lengths (leading to array references),
+ * null checks (also leading to array references), and new arrays
+ * (leading to the actual length). This makes it more likely related
+ * instructions become actually comparable.
+ */
+inline HInstruction* HuntForDeclaration(HInstruction* instruction) {
+ while (instruction->IsArrayLength() ||
+ instruction->IsNullCheck() ||
+ instruction->IsNewArray()) {
+ instruction = instruction->IsNewArray()
+ ? instruction->AsNewArray()->GetLength()
+ : instruction->InputAt(0);
+ }
+ return instruction;
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_H_
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index 5d9a6528ca..7686ba851b 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -52,7 +52,7 @@ TEST(Node, RemoveInstruction) {
exit_block->AddInstruction(new (&allocator) HExit());
HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, null_check);
+ &allocator, 1, graph->GetArtMethod(), 0, null_check);
null_check->SetRawEnvironment(environment);
environment->SetRawEnvAt(0, parameter);
parameter->AddEnvUseAt(null_check->GetEnvironment(), 0);
@@ -137,7 +137,7 @@ TEST(Node, ParentEnvironment) {
ASSERT_TRUE(parameter1->GetUses().HasExactlyOneElement());
HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, with_environment);
+ &allocator, 1, graph->GetArtMethod(), 0, with_environment);
ArenaVector<HInstruction*> array(allocator.Adapter());
array.push_back(parameter1);
@@ -148,13 +148,13 @@ TEST(Node, ParentEnvironment) {
ASSERT_TRUE(parameter1->GetEnvUses().HasExactlyOneElement());
HEnvironment* parent1 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, nullptr);
+ &allocator, 1, graph->GetArtMethod(), 0, nullptr);
parent1->CopyFrom(array);
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u);
HEnvironment* parent2 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, nullptr);
+ &allocator, 1, graph->GetArtMethod(), 0, nullptr);
parent2->CopyFrom(array);
parent1->SetAndCopyParentChain(&allocator, parent2);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 4bf5b080a7..297500b12f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1205,7 +1205,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
}
MaybeRecordStat(MethodCompilationStat::kCompiled);
codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item);
- codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data, dex_cache);
+ codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
const void* code = code_cache->CommitCode(
self,
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index e321b9e3aa..a0fdde169d 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -62,8 +62,9 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBootImageAddress:
+ case HLoadClass::LoadKind::kBssEntry:
// Add a base register for PC-relative literals on R2.
InitializePCRelativeBasePointer();
load_class->AddSpecialInput(base_);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index b1fdb1792d..2befc8ca4e 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -83,7 +83,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == HLoadClass::LoadKind::kDexCachePcRelative) {
+ load_kind == HLoadClass::LoadKind::kBssEntry) {
InitializePCRelativeBasePointer();
load_class->AddSpecialInput(base_);
}
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index f9ac3a0f72..efbaf6c221 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -16,6 +16,9 @@
#include "prepare_for_register_allocation.h"
+#include "jni_internal.h"
+#include "well_known_classes.h"
+
namespace art {
void PrepareForRegisterAllocation::Run() {
@@ -42,16 +45,12 @@ void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
if (check->IsStringCharAt()) {
// Add a fake environment for String.charAt() inline info as we want
// the exception to appear as being thrown from there.
- const DexFile& dex_file = check->GetEnvironment()->GetDexFile();
- DCHECK_STREQ(dex_file.PrettyMethod(check->GetStringCharAtMethodIndex()).c_str(),
- "char java.lang.String.charAt(int)");
+ ArtMethod* char_at_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
ArenaAllocator* arena = GetGraph()->GetArena();
HEnvironment* environment = new (arena) HEnvironment(arena,
/* number_of_vregs */ 0u,
- dex_file,
- check->GetStringCharAtMethodIndex(),
+ char_at_method,
/* dex_pc */ DexFile::kDexNoIndex,
- kVirtual,
check);
check->InsertRawEnvironment(environment);
}
@@ -134,39 +133,6 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
}
}
-void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) {
- HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass();
- const bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse();
- // Change the entrypoint to kQuickAllocObject if either:
- // - the class is finalizable (only kQuickAllocObject handles finalizable classes),
- // - the class needs access checks (we do not know if it's finalizable),
- // - or the load class has only one use.
- if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObject);
- instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex().index_), 0);
- if (has_only_one_use) {
- // We've just removed the only use of the HLoadClass. Since we don't run DCE after this pass,
- // do it manually if possible.
- if (!load_class->CanThrow()) {
- // If the load class can not throw, it has no side effects and can be removed if there is
- // only one use.
- load_class->GetBlock()->RemoveInstruction(load_class);
- } else if (!instruction->GetEnvironment()->IsFromInlinedInvoke() &&
- CanMoveClinitCheck(load_class, instruction)) {
- // The allocation entry point that deals with access checks does not work with inlined
- // methods, so we need to check whether this allocation comes from an inlined method.
- // We also need to make the same check as for moving clinit check, whether the HLoadClass
- // has the clinit check responsibility or not (HLoadClass can throw anyway).
- // If it needed access checks, we delegate the access check to the allocation.
- if (load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck);
- }
- load_class->GetBlock()->RemoveInstruction(load_class);
- }
- }
- }
-}
-
bool PrepareForRegisterAllocation::CanEmitConditionAt(HCondition* condition,
HInstruction* user) const {
if (condition->GetNext() != user) {
@@ -232,8 +198,7 @@ bool PrepareForRegisterAllocation::CanMoveClinitCheck(HInstruction* input,
return false;
}
if (user_environment->GetDexPc() != input_environment->GetDexPc() ||
- user_environment->GetMethodIdx() != input_environment->GetMethodIdx() ||
- !IsSameDexFile(user_environment->GetDexFile(), input_environment->GetDexFile())) {
+ user_environment->GetMethod() != input_environment->GetMethod()) {
return false;
}
user_environment = user_environment->GetParent();
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a6791482a7..c128227654 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -44,7 +44,6 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
void VisitCondition(HCondition* condition) OVERRIDE;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
- void VisitNewInstance(HNewInstance* instruction) OVERRIDE;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 33b3875e3b..b02f2509ab 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -76,6 +76,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
worklist_(worklist),
is_first_run_(is_first_run) {}
+ void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE;
void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
@@ -151,38 +152,6 @@ void ReferenceTypePropagation::Visit(HInstruction* instruction) {
instruction->Accept(&visitor);
}
-void ReferenceTypePropagation::Run() {
- worklist_.reserve(kDefaultWorklistSize);
-
- // To properly propagate type info we need to visit in the dominator-based order.
- // Reverse post order guarantees a node's dominators are visited first.
- // We take advantage of this order in `VisitBasicBlock`.
- for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- VisitBasicBlock(block);
- }
-
- ProcessWorklist();
- ValidateTypes();
-}
-
-void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
- // Handle Phis first as there might be instructions in the same block who depend on them.
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- VisitPhi(it.Current()->AsPhi());
- }
-
- // Handle instructions.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- instr->Accept(&visitor);
- }
-
- // Add extra nodes to bound types.
- BoundTypeForIfNotNull(block);
- BoundTypeForIfInstanceOf(block);
-}
-
// Check if we should create a bound type for the given object at the specified
// position. Because of inlining and the fact we run RTP more than once and we
// might have a HBoundType already. If we do, we should not create a new one.
@@ -225,6 +194,153 @@ static bool ShouldCreateBoundType(HInstruction* position,
return false;
}
+// Helper method to bound the type of `receiver` for all instructions dominated
+// by `start_block`, or `start_instruction` if `start_block` is null. The new
+// bound type will have its upper bound be `class_rti`.
+static void BoundTypeIn(HInstruction* receiver,
+ HBasicBlock* start_block,
+ HInstruction* start_instruction,
+ const ReferenceTypeInfo& class_rti) {
+ // We only need to bound the type if we have uses in the relevant block.
+ // So start with null and create the HBoundType lazily, only if it's needed.
+ HBoundType* bound_type = nullptr;
+ DCHECK(!receiver->IsLoadClass()) << "We should not replace HLoadClass instructions";
+ const HUseList<HInstruction*>& uses = receiver->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
+ ++it;
+ bool dominates = (start_instruction != nullptr)
+ ? start_instruction->StrictlyDominates(user)
+ : start_block->Dominates(user->GetBlock());
+ if (!dominates) {
+ continue;
+ }
+ if (bound_type == nullptr) {
+ ScopedObjectAccess soa(Thread::Current());
+ HInstruction* insert_point = (start_instruction != nullptr)
+ ? start_instruction->GetNext()
+ : start_block->GetFirstInstruction();
+ if (ShouldCreateBoundType(
+ insert_point, receiver, class_rti, start_instruction, start_block)) {
+ bound_type = new (receiver->GetBlock()->GetGraph()->GetArena()) HBoundType(receiver);
+ bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
+ start_block->InsertInstructionBefore(bound_type, insert_point);
+ // To comply with the RTP algorithm, don't type the bound type just yet, it will
+ // be handled in RTPVisitor::VisitBoundType.
+ } else {
+ // We already have a bound type on the position we would need to insert
+ // the new one. The existing bound type should dominate all the users
+ // (dchecked) so there's no need to continue.
+ break;
+ }
+ }
+ user->ReplaceInput(bound_type, index);
+ }
+ // If the receiver is a null check, also bound the type of the actual
+ // receiver.
+ if (receiver->IsNullCheck()) {
+ BoundTypeIn(receiver->InputAt(0), start_block, start_instruction, class_rti);
+ }
+}
+
+// Recognize the patterns:
+// if (obj.shadow$_klass_ == Foo.class) ...
+// deoptimize if (obj.shadow$_klass_ == Foo.class)
+static void BoundTypeForClassCheck(HInstruction* check) {
+ if (!check->IsIf() && !check->IsDeoptimize()) {
+ return;
+ }
+ HInstruction* compare = check->InputAt(0);
+ if (!compare->IsEqual() && !compare->IsNotEqual()) {
+ return;
+ }
+ HInstruction* input_one = compare->InputAt(0);
+ HInstruction* input_two = compare->InputAt(1);
+ HLoadClass* load_class = input_one->IsLoadClass()
+ ? input_one->AsLoadClass()
+ : input_two->AsLoadClass();
+ if (load_class == nullptr) {
+ return;
+ }
+
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ if (!class_rti.IsValid()) {
+ // We have loaded an unresolved class. Don't bother bounding the type.
+ return;
+ }
+
+ HInstanceFieldGet* field_get = (load_class == input_one)
+ ? input_two->AsInstanceFieldGet()
+ : input_one->AsInstanceFieldGet();
+ if (field_get == nullptr) {
+ return;
+ }
+ HInstruction* receiver = field_get->InputAt(0);
+ ReferenceTypeInfo receiver_type = receiver->GetReferenceTypeInfo();
+ if (receiver_type.IsExact()) {
+ // If we already know the receiver type, don't bother updating its users.
+ return;
+ }
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0);
+ DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
+ if (field_get->GetFieldInfo().GetField() != field) {
+ return;
+ }
+ }
+
+ if (check->IsIf()) {
+ HBasicBlock* trueBlock = compare->IsEqual()
+ ? check->AsIf()->IfTrueSuccessor()
+ : check->AsIf()->IfFalseSuccessor();
+ BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
+ } else {
+ DCHECK(check->IsDeoptimize());
+ if (compare->IsEqual()) {
+ BoundTypeIn(receiver, check->GetBlock(), check, class_rti);
+ }
+ }
+}
+
+void ReferenceTypePropagation::Run() {
+ worklist_.reserve(kDefaultWorklistSize);
+
+ // To properly propagate type info we need to visit in the dominator-based order.
+ // Reverse post order guarantees a node's dominators are visited first.
+ // We take advantage of this order in `VisitBasicBlock`.
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ VisitBasicBlock(block);
+ }
+
+ ProcessWorklist();
+ ValidateTypes();
+}
+
+void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
+ RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_);
+ // Handle Phis first as there might be instructions in the same block who depend on them.
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ VisitPhi(it.Current()->AsPhi());
+ }
+
+ // Handle instructions. Since RTP may add HBoundType instructions just after the
+ // last visited instruction, use `HInstructionIteratorHandleChanges` iterator.
+ for (HInstructionIteratorHandleChanges it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ instr->Accept(&visitor);
+ }
+
+ // Add extra nodes to bound types.
+ BoundTypeForIfNotNull(block);
+ BoundTypeForIfInstanceOf(block);
+ BoundTypeForClassCheck(block->GetLastInstruction());
+}
+
void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
HIf* ifInstruction = block->GetLastInstruction()->AsIf();
if (ifInstruction == nullptr) {
@@ -254,40 +370,14 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
// We only need to bound the type if we have uses in the relevant block.
// So start with null and create the HBoundType lazily, only if it's needed.
- HBoundType* bound_type = nullptr;
HBasicBlock* notNullBlock = ifInput->IsNotEqual()
? ifInstruction->IfTrueSuccessor()
: ifInstruction->IfFalseSuccessor();
- const HUseList<HInstruction*>& uses = obj->GetUses();
- for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
- HInstruction* user = it->GetUser();
- size_t index = it->GetIndex();
- // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
- ++it;
- if (notNullBlock->Dominates(user->GetBlock())) {
- if (bound_type == nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- HInstruction* insert_point = notNullBlock->GetFirstInstruction();
- ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
- handle_cache_.GetObjectClassHandle(), /* is_exact */ false);
- if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) {
- bound_type = new (graph_->GetArena()) HBoundType(obj);
- bound_type->SetUpperBound(object_rti, /* bound_can_be_null */ false);
- if (obj->GetReferenceTypeInfo().IsValid()) {
- bound_type->SetReferenceTypeInfo(obj->GetReferenceTypeInfo());
- }
- notNullBlock->InsertInstructionBefore(bound_type, insert_point);
- } else {
- // We already have a bound type on the position we would need to insert
- // the new one. The existing bound type should dominate all the users
- // (dchecked) so there's no need to continue.
- break;
- }
- }
- user->ReplaceInput(bound_type, index);
- }
- }
+ ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
+ handle_cache_.GetObjectClassHandle(), /* is_exact */ false);
+
+ BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti);
}
// Returns true if one of the patterns below has been recognized. If so, the
@@ -378,15 +468,10 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
- {
- if (!class_rti.IsValid()) {
- // He have loaded an unresolved class. Don't bother bounding the type.
- return;
- }
+ if (!class_rti.IsValid()) {
+ // He have loaded an unresolved class. Don't bother bounding the type.
+ return;
}
- // We only need to bound the type if we have uses in the relevant block.
- // So start with null and create the HBoundType lazily, only if it's needed.
- HBoundType* bound_type = nullptr;
HInstruction* obj = instanceOf->InputAt(0);
if (obj->GetReferenceTypeInfo().IsExact() && !obj->IsPhi()) {
@@ -398,33 +483,14 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
// input.
return;
}
- DCHECK(!obj->IsLoadClass()) << "We should not replace HLoadClass instructions";
- const HUseList<HInstruction*>& uses = obj->GetUses();
- for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
- HInstruction* user = it->GetUser();
- size_t index = it->GetIndex();
- // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
- ++it;
- if (instanceOfTrueBlock->Dominates(user->GetBlock())) {
- if (bound_type == nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction();
- if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) {
- bound_type = new (graph_->GetArena()) HBoundType(obj);
- bool is_exact = class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes();
- bound_type->SetUpperBound(ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), is_exact),
- /* InstanceOf fails for null. */ false);
- instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point);
- } else {
- // We already have a bound type on the position we would need to insert
- // the new one. The existing bound type should dominate all the users
- // (dchecked) so there's no need to continue.
- break;
- }
- }
- user->ReplaceInput(bound_type, index);
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
+ class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false);
}
}
+ BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti);
}
void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
@@ -433,18 +499,19 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
if (instr->IsInvokeStaticOrDirect() && instr->AsInvokeStaticOrDirect()->IsStringInit()) {
// Calls to String.<init> are replaced with a StringFactory.
if (kIsDebugBuild) {
- HInvoke* invoke = instr->AsInvoke();
+ HInvokeStaticOrDirect* invoke = instr->AsInvokeStaticOrDirect();
ClassLinker* cl = Runtime::Current()->GetClassLinker();
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
+ const DexFile& dex_file = *invoke->GetTargetMethod().dex_file;
Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(FindDexCacheWithHint(self, invoke->GetDexFile(), hint_dex_cache_)));
+ hs.NewHandle(FindDexCacheWithHint(self, dex_file, hint_dex_cache_)));
// Use a null loader. We should probably use the compiling method's class loader,
// but then we would need to pass it to RTPVisitor just for this debug check. Since
// the method is from the String class, the null loader is good enough.
Handle<mirror::ClassLoader> loader;
ArtMethod* method = cl->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
- invoke->GetDexFile(), invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
+ dex_file, invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
DCHECK(method != nullptr);
mirror::Class* declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr);
@@ -464,6 +531,10 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
}
}
+void ReferenceTypePropagation::RTPVisitor::VisitDeoptimize(HDeoptimize* instr) {
+ BoundTypeForClassCheck(instr);
+}
+
void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr,
dex::TypeIndex type_idx,
const DexFile& dex_file,
@@ -477,11 +548,13 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction*
}
void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) {
- UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
+ ScopedObjectAccess soa(Thread::Current());
+ SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
}
void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) {
- UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
+ ScopedObjectAccess soa(Thread::Current());
+ SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
}
static mirror::Class* GetClassFromDexCache(Thread* self,
@@ -515,16 +588,9 @@ void ReferenceTypePropagation::RTPVisitor::UpdateFieldAccessTypeInfo(HInstructio
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::Class> klass;
- // The field index is unknown only during tests.
- if (info.GetFieldIndex() != kUnknownFieldIndex) {
- ClassLinker* cl = Runtime::Current()->GetClassLinker();
- ArtField* field = cl->GetResolvedField(info.GetFieldIndex(),
- MakeObjPtr(info.GetDexCache().Get()));
- // TODO: There are certain cases where we can't resolve the field.
- // b/21914925 is open to keep track of a repro case for this issue.
- if (field != nullptr) {
- klass = field->GetType<false>();
- }
+ // The field is unknown only during tests.
+ if (info.GetField() != nullptr) {
+ klass = info.GetField()->GetType<false>();
}
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
@@ -556,14 +622,10 @@ void ReferenceTypePropagation::RTPVisitor::VisitUnresolvedStaticFieldGet(
void ReferenceTypePropagation::RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
- // Get type from dex cache assuming it was populated by the verifier.
- mirror::Class* resolved_class = GetClassFromDexCache(soa.Self(),
- instr->GetDexFile(),
- instr->GetTypeIndex(),
- hint_dex_cache_);
- if (IsAdmissible(resolved_class)) {
+ Handle<mirror::Class> resolved_class = instr->GetClass();
+ if (IsAdmissible(resolved_class.Get())) {
instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(
- handle_cache_->NewHandle(resolved_class), /* is_exact */ true));
+ resolved_class, /* is_exact */ true));
}
instr->SetReferenceTypeInfo(
ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact */ true));
@@ -780,12 +842,8 @@ void ReferenceTypePropagation::RTPVisitor::VisitInvoke(HInvoke* instr) {
}
ScopedObjectAccess soa(Thread::Current());
- ClassLinker* cl = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache =
- FindDexCacheWithHint(soa.Self(), instr->GetDexFile(), hint_dex_cache_);
- PointerSize pointer_size = cl->GetImagePointerSize();
- ArtMethod* method = dex_cache->GetResolvedMethod(instr->GetDexMethodIndex(), pointer_size);
- mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false, pointer_size);
+ ArtMethod* method = instr->GetResolvedMethod();
+ mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(/* resolve */ false);
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
}
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 559f40923b..2227872f76 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -492,7 +492,6 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
HInstruction** input2) {
HGraph* graph = CreateGraph(allocator);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
- ScopedNullHandle<mirror::DexCache> dex_cache;
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
HInstruction* parameter = new (allocator) HParameterValue(
@@ -504,13 +503,13 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
entry->AddSuccessor(block);
HInstruction* test = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimBoolean,
MemberOffset(22),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
block->AddInstruction(test);
block->AddInstruction(new (allocator) HIf(test));
@@ -531,22 +530,22 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
*phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt);
join->AddPhi(*phi);
*input1 = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
*input2 = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
then->AddInstruction(*input1);
else_->AddInstruction(*input2);
@@ -654,7 +653,6 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
HInstruction** field,
HInstruction** ret) {
HGraph* graph = CreateGraph(allocator);
- ScopedNullHandle<mirror::DexCache> dex_cache;
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -667,13 +665,13 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
entry->AddSuccessor(block);
*field = new (allocator) HInstanceFieldGet(parameter,
+ nullptr,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
kUnknownClassDefIndex,
graph->GetDexFile(),
- dex_cache,
0);
block->AddInstruction(*field);
*ret = new (allocator) HReturn(*field);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index ca26c30dcf..c5294107ae 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -133,72 +133,65 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- const DexFile& dex_file = load_class->GetDexFile();
- dex::TypeIndex type_index = load_class->GetTypeIndex();
- Handle<mirror::DexCache> dex_cache = IsSameDexFile(dex_file, *compilation_unit_.GetDexFile())
- ? compilation_unit_.GetDexCache()
- : hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
- mirror::Class* cls = dex_cache->GetResolvedType(type_index);
- SharpenClass(load_class, cls, handles_, codegen_, compiler_driver_);
+ SharpenClass(load_class, codegen_, compiler_driver_);
}
void HSharpening::SharpenClass(HLoadClass* load_class,
- mirror::Class* klass,
- VariableSizedHandleScope* handles,
CodeGenerator* codegen,
CompilerDriver* compiler_driver) {
- ScopedAssertNoThreadSuspension sants("Sharpening class in compiler");
+ Handle<mirror::Class> klass = load_class->GetClass();
DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
<< load_class->GetLoadKind();
- DCHECK(!load_class->IsInDexCache()) << "HLoadClass should not be optimized before sharpening.";
DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
+ if (load_class->NeedsAccessCheck()) {
+ // We need to call the runtime anyway, so we simply get the class as that call's return value.
+ return;
+ }
+
+ if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
+ // Loading from the ArtMethod* is the most efficient retrieval in code size.
+ // TODO: This may not actually be true for all architectures and
+ // locations of target classes. The additional register pressure
+ // for using the ArtMethod* should be considered.
+ return;
+ }
+
const DexFile& dex_file = load_class->GetDexFile();
dex::TypeIndex type_index = load_class->GetTypeIndex();
- bool is_in_dex_cache = false;
bool is_in_boot_image = false;
HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
- uint64_t address = 0u; // Class or dex cache element address.
Runtime* runtime = Runtime::Current();
if (codegen->GetCompilerOptions().IsBootImage()) {
// Compiling boot image. Check if the class is a boot image class.
DCHECK(!runtime->UseJitCompilation());
if (!compiler_driver->GetSupportBootImageFixup()) {
- // MIPS64 or compiler_driver_test. Do not sharpen.
+ // compiler_driver_test. Do not sharpen.
desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
- } else if ((klass != nullptr) && compiler_driver->IsImageClass(
+ } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
is_in_boot_image = true;
- is_in_dex_cache = true;
desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
: HLoadClass::LoadKind::kBootImageLinkTimeAddress;
} else {
- // Not a boot image class. We must go through the dex cache.
+ // Not a boot image class.
DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
- desired_load_kind = HLoadClass::LoadKind::kDexCachePcRelative;
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
} else {
- is_in_boot_image = (klass != nullptr) && runtime->GetHeap()->ObjectIsInBootImageSpace(klass);
+ is_in_boot_image = (klass.Get() != nullptr) &&
+ runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- is_in_dex_cache = (klass != nullptr);
if (is_in_boot_image) {
// TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(klass);
- } else if (is_in_dex_cache) {
+ } else if (klass.Get() != nullptr) {
desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
- // We store in the address field the location of the stack reference maintained
- // by the handle. We do this now so that the code generation does not need to figure
- // out which class loader to use.
- address = reinterpret_cast<uint64_t>(handles->NewHandle(klass).GetReference());
} else {
// Class not loaded yet. This happens when the dex code requesting
// this `HLoadClass` hasn't been executed in the interpreter.
@@ -209,15 +202,9 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
} else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
// AOT app compilation. Check if the class is in the boot image.
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(klass);
} else {
// Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
- // Use PC-relative load from the dex cache if the dex file belongs
- // to the oat file that we're currently compiling.
- desired_load_kind =
- ContainsElement(compiler_driver->GetDexFilesForOatFile(), &load_class->GetDexFile())
- ? HLoadClass::LoadKind::kDexCachePcRelative
- : HLoadClass::LoadKind::kDexCacheViaMethod;
+ desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
}
DCHECK_NE(desired_load_kind, static_cast<HLoadClass::LoadKind>(-1));
@@ -226,42 +213,18 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
load_class->MarkInBootImage();
}
- if (load_class->NeedsAccessCheck()) {
- // We need to call the runtime anyway, so we simply get the class as that call's return value.
- return;
- }
-
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
- // Loading from the ArtMethod* is the most efficient retrieval in code size.
- // TODO: This may not actually be true for all architectures and
- // locations of target classes. The additional register pressure
- // for using the ArtMethod* should be considered.
- return;
- }
-
- if (is_in_dex_cache) {
- load_class->MarkInDexCache();
- }
-
HLoadClass::LoadKind load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
+ case HLoadClass::LoadKind::kBssEntry:
case HLoadClass::LoadKind::kDexCacheViaMethod:
load_class->SetLoadKindWithTypeReference(load_kind, dex_file, type_index);
break;
case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
- DCHECK_NE(address, 0u);
- load_class->SetLoadKindWithAddress(load_kind, address);
- break;
- case HLoadClass::LoadKind::kDexCachePcRelative: {
- PointerSize pointer_size = InstructionSetPointerSize(codegen->GetInstructionSet());
- DexCacheArraysLayout layout(pointer_size, &dex_file);
- size_t element_index = layout.TypeOffset(type_index);
- load_class->SetLoadKindWithDexCacheReference(load_kind, dex_file, element_index);
+ load_class->SetLoadKind(load_kind);
break;
- }
default:
LOG(FATAL) << "Unexpected load kind: " << load_kind;
UNREACHABLE();
@@ -274,8 +237,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
const DexFile& dex_file = load_string->GetDexFile();
dex::StringIndex string_index = load_string->GetStringIndex();
- HLoadString::LoadKind desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
- uint64_t address = 0u; // String or dex cache element address.
+ HLoadString::LoadKind desired_load_kind = static_cast<HLoadString::LoadKind>(-1);
{
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
@@ -284,12 +246,13 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
Handle<mirror::DexCache> dex_cache = IsSameDexFile(dex_file, *compilation_unit_.GetDexFile())
? compilation_unit_.GetDexCache()
: hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
+ mirror::String* string = nullptr;
if (codegen_->GetCompilerOptions().IsBootImage()) {
// Compiling boot image. Resolve the string and allocate it if needed, to ensure
// the string will be added to the boot image.
DCHECK(!runtime->UseJitCompilation());
- mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
+ string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr);
if (compiler_driver_->GetSupportBootImageFixup()) {
DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file));
@@ -297,49 +260,41 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) {
? HLoadString::LoadKind::kBootImageLinkTimePcRelative
: HLoadString::LoadKind::kBootImageLinkTimeAddress;
} else {
- // MIPS64 or compiler_driver_test. Do not sharpen.
- DCHECK_EQ(desired_load_kind, HLoadString::LoadKind::kDexCacheViaMethod);
+ // compiler_driver_test. Do not sharpen.
+ desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
}
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- mirror::String* string = class_linker->LookupString(dex_file, string_index, dex_cache);
+ string = class_linker->LookupString(dex_file, string_index, dex_cache);
if (string != nullptr) {
if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(string);
} else {
desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
}
+ } else {
+ desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod;
}
} else {
// AOT app compilation. Try to lookup the string without allocating if not found.
- mirror::String* string = class_linker->LookupString(dex_file, string_index, dex_cache);
+ string = class_linker->LookupString(dex_file, string_index, dex_cache);
if (string != nullptr &&
runtime->GetHeap()->ObjectIsInBootImageSpace(string) &&
!codegen_->GetCompilerOptions().GetCompilePic()) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(string);
} else {
desired_load_kind = HLoadString::LoadKind::kBssEntry;
}
}
+ if (string != nullptr) {
+ load_string->SetString(handles_->NewHandle(string));
+ }
}
+ DCHECK_NE(desired_load_kind, static_cast<HLoadString::LoadKind>(-1));
HLoadString::LoadKind load_kind = codegen_->GetSupportedLoadStringKind(desired_load_kind);
- switch (load_kind) {
- case HLoadString::LoadKind::kBootImageLinkTimeAddress:
- case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadString::LoadKind::kBssEntry:
- case HLoadString::LoadKind::kDexCacheViaMethod:
- case HLoadString::LoadKind::kJitTableAddress:
- load_string->SetLoadKindWithStringReference(load_kind, dex_file, string_index);
- break;
- case HLoadString::LoadKind::kBootImageAddress:
- DCHECK_NE(address, 0u);
- load_string->SetLoadKindWithAddress(load_kind, address);
- break;
- }
+ load_string->SetLoadKind(load_kind);
}
} // namespace art
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index ae5ccb33ab..ae3d83ef2c 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -49,8 +49,6 @@ class HSharpening : public HOptimization {
// Used internally but also by the inliner.
static void SharpenClass(HLoadClass* load_class,
- mirror::Class* klass,
- VariableSizedHandleScope* handles,
CodeGenerator* codegen,
CompilerDriver* compiler_driver)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index fc8af6462a..a9a1e6f592 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -13,8 +13,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
#include "stack_map_stream.h"
+#include "art_method.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+
namespace art {
void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
@@ -26,7 +31,7 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
DCHECK_NE(dex_pc, static_cast<uint32_t>(-1)) << "invalid dex_pc";
current_entry_.dex_pc = dex_pc;
- current_entry_.native_pc_offset = native_pc_offset;
+ current_entry_.native_pc_code_offset = CodeOffset::FromOffset(native_pc_offset, instruction_set_);
current_entry_.register_mask = register_mask;
current_entry_.sp_mask = sp_mask;
current_entry_.num_dex_registers = num_dex_registers;
@@ -98,15 +103,27 @@ void StackMapStream::AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t
current_dex_register_++;
}
-void StackMapStream::BeginInlineInfoEntry(uint32_t method_index,
+static bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
+ // Note: the runtime is null only for unit testing.
+ return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
+}
+
+void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
uint32_t dex_pc,
- InvokeType invoke_type,
- uint32_t num_dex_registers) {
+ uint32_t num_dex_registers,
+ const DexFile* outer_dex_file) {
DCHECK(!in_inline_frame_);
in_inline_frame_ = true;
- current_inline_info_.method_index = method_index;
+ if (EncodeArtMethodInInlineInfo(method)) {
+ current_inline_info_.method = method;
+ } else {
+ if (dex_pc != static_cast<uint32_t>(-1) && kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(IsSameDexFile(*outer_dex_file, *method->GetDexFile()));
+ }
+ current_inline_info_.method_index = method->GetDexMethodIndexUnchecked();
+ }
current_inline_info_.dex_pc = dex_pc;
- current_inline_info_.invoke_type = invoke_type;
current_inline_info_.num_dex_registers = num_dex_registers;
current_inline_info_.dex_register_locations_start_index = dex_register_locations_.size();
if (num_dex_registers != 0) {
@@ -127,10 +144,10 @@ void StackMapStream::EndInlineInfoEntry() {
current_inline_info_ = InlineInfoEntry();
}
-uint32_t StackMapStream::ComputeMaxNativePcOffset() const {
- uint32_t max_native_pc_offset = 0u;
+CodeOffset StackMapStream::ComputeMaxNativePcCodeOffset() const {
+ CodeOffset max_native_pc_offset;
for (const StackMapEntry& entry : stack_maps_) {
- max_native_pc_offset = std::max(max_native_pc_offset, entry.native_pc_offset);
+ max_native_pc_offset = std::max(max_native_pc_offset, entry.native_pc_code_offset);
}
return max_native_pc_offset;
}
@@ -140,8 +157,9 @@ size_t StackMapStream::PrepareForFillIn() {
dex_register_maps_size_ = ComputeDexRegisterMapsSize();
ComputeInlineInfoEncoding(); // needs dex_register_maps_size_.
inline_info_size_ = inline_infos_.size() * inline_info_encoding_.GetEntrySize();
- uint32_t max_native_pc_offset = ComputeMaxNativePcOffset();
- size_t stack_map_size = stack_map_encoding_.SetFromSizes(max_native_pc_offset,
+ CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset();
+ // The stack map contains compressed native offsets.
+ size_t stack_map_size = stack_map_encoding_.SetFromSizes(max_native_pc_offset.CompressedValue(),
dex_pc_max_,
dex_register_maps_size_,
inline_info_size_,
@@ -229,25 +247,32 @@ size_t StackMapStream::ComputeDexRegisterMapsSize() const {
void StackMapStream::ComputeInlineInfoEncoding() {
uint32_t method_index_max = 0;
uint32_t dex_pc_max = DexFile::kDexNoIndex;
- uint32_t invoke_type_max = 0;
+ uint32_t extra_data_max = 0;
uint32_t inline_info_index = 0;
for (const StackMapEntry& entry : stack_maps_) {
for (size_t j = 0; j < entry.inlining_depth; ++j) {
InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
- method_index_max = std::max(method_index_max, inline_entry.method_index);
+ if (inline_entry.method == nullptr) {
+ method_index_max = std::max(method_index_max, inline_entry.method_index);
+ extra_data_max = std::max(extra_data_max, 1u);
+ } else {
+ method_index_max = std::max(
+ method_index_max, High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ extra_data_max = std::max(
+ extra_data_max, Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ }
if (inline_entry.dex_pc != DexFile::kDexNoIndex &&
(dex_pc_max == DexFile::kDexNoIndex || dex_pc_max < inline_entry.dex_pc)) {
dex_pc_max = inline_entry.dex_pc;
}
- invoke_type_max = std::max(invoke_type_max, static_cast<uint32_t>(inline_entry.invoke_type));
}
}
DCHECK_EQ(inline_info_index, inline_infos_.size());
inline_info_encoding_.SetFromSizes(method_index_max,
dex_pc_max,
- invoke_type_max,
+ extra_data_max,
dex_register_maps_size_);
}
@@ -295,7 +320,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
StackMapEntry entry = stack_maps_[i];
stack_map.SetDexPc(stack_map_encoding_, entry.dex_pc);
- stack_map.SetNativePcOffset(stack_map_encoding_, entry.native_pc_offset);
+ stack_map.SetNativePcCodeOffset(stack_map_encoding_, entry.native_pc_code_offset);
stack_map.SetRegisterMask(stack_map_encoding_, entry.register_mask);
size_t number_of_stack_mask_bits = stack_map.GetNumberOfStackMaskBits(stack_map_encoding_);
if (entry.sp_mask != nullptr) {
@@ -354,9 +379,20 @@ void StackMapStream::FillIn(MemoryRegion region) {
DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
- inline_info.SetMethodIndexAtDepth(inline_info_encoding_, depth, inline_entry.method_index);
+ if (inline_entry.method != nullptr) {
+ inline_info.SetMethodIndexAtDepth(
+ inline_info_encoding_,
+ depth,
+ High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ inline_info.SetExtraDataAtDepth(
+ inline_info_encoding_,
+ depth,
+ Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ } else {
+ inline_info.SetMethodIndexAtDepth(inline_info_encoding_, depth, inline_entry.method_index);
+ inline_info.SetExtraDataAtDepth(inline_info_encoding_, depth, 1);
+ }
inline_info.SetDexPcAtDepth(inline_info_encoding_, depth, inline_entry.dex_pc);
- inline_info.SetInvokeTypeAtDepth(inline_info_encoding_, depth, inline_entry.invoke_type);
if (inline_entry.num_dex_registers == 0) {
// No dex map available.
inline_info.SetDexRegisterMapOffsetAtDepth(inline_info_encoding_,
@@ -511,7 +547,8 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
StackMapEntry entry = stack_maps_[s];
// Check main stack map fields.
- DCHECK_EQ(stack_map.GetNativePcOffset(stack_map_encoding), entry.native_pc_offset);
+ DCHECK_EQ(stack_map.GetNativePcOffset(stack_map_encoding, instruction_set_),
+ entry.native_pc_code_offset.Uint32Value(instruction_set_));
DCHECK_EQ(stack_map.GetDexPc(stack_map_encoding), entry.dex_pc);
DCHECK_EQ(stack_map.GetRegisterMask(stack_map_encoding), entry.register_mask);
size_t num_stack_mask_bits = stack_map.GetNumberOfStackMaskBits(stack_map_encoding);
@@ -544,10 +581,13 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
InlineInfoEntry inline_entry = inline_infos_[inline_info_index];
DCHECK_EQ(inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, d),
inline_entry.dex_pc);
- DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, d),
- inline_entry.method_index);
- DCHECK_EQ(inline_info.GetInvokeTypeAtDepth(encoding.inline_info_encoding, d),
- inline_entry.invoke_type);
+ if (inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, d)) {
+ DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info_encoding, d),
+ inline_entry.method);
+ } else {
+ DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, d),
+ inline_entry.method_index);
+ }
CheckDexRegisterMap(code_info,
code_info.GetDexRegisterMapAtDepth(
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 53a9795d52..8fec472437 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -59,8 +59,10 @@ class DexRegisterLocationHashFn {
*/
class StackMapStream : public ValueObject {
public:
- explicit StackMapStream(ArenaAllocator* allocator)
+ explicit StackMapStream(ArenaAllocator* allocator,
+ InstructionSet instruction_set)
: allocator_(allocator),
+ instruction_set_(instruction_set),
stack_maps_(allocator->Adapter(kArenaAllocStackMapStream)),
location_catalog_entries_(allocator->Adapter(kArenaAllocStackMapStream)),
location_catalog_entries_indices_(allocator->Adapter(kArenaAllocStackMapStream)),
@@ -95,7 +97,7 @@ class StackMapStream : public ValueObject {
// See runtime/stack_map.h to know what these fields contain.
struct StackMapEntry {
uint32_t dex_pc;
- uint32_t native_pc_offset;
+ CodeOffset native_pc_code_offset;
uint32_t register_mask;
BitVector* sp_mask;
uint32_t num_dex_registers;
@@ -109,8 +111,8 @@ class StackMapStream : public ValueObject {
struct InlineInfoEntry {
uint32_t dex_pc; // DexFile::kDexNoIndex for intrinsified native methods.
+ ArtMethod* method;
uint32_t method_index;
- InvokeType invoke_type;
uint32_t num_dex_registers;
BitVector* live_dex_registers_mask;
size_t dex_register_locations_start_index;
@@ -126,10 +128,10 @@ class StackMapStream : public ValueObject {
void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value);
- void BeginInlineInfoEntry(uint32_t method_index,
+ void BeginInlineInfoEntry(ArtMethod* method,
uint32_t dex_pc,
- InvokeType invoke_type,
- uint32_t num_dex_registers);
+ uint32_t num_dex_registers,
+ const DexFile* outer_dex_file = nullptr);
void EndInlineInfoEntry();
size_t GetNumberOfStackMaps() const {
@@ -141,11 +143,9 @@ class StackMapStream : public ValueObject {
}
void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) {
- stack_maps_[i].native_pc_offset = native_pc_offset;
+ stack_maps_[i].native_pc_code_offset = CodeOffset::FromOffset(native_pc_offset, instruction_set_);
}
- uint32_t ComputeMaxNativePcOffset() const;
-
// Prepares the stream to fill in a memory region. Must be called before FillIn.
// Returns the size (in bytes) needed to store this stream.
size_t PrepareForFillIn();
@@ -158,6 +158,8 @@ class StackMapStream : public ValueObject {
size_t ComputeDexRegisterMapsSize() const;
void ComputeInlineInfoEncoding();
+ CodeOffset ComputeMaxNativePcCodeOffset() const;
+
// Returns the index of an entry with the same dex register map as the current_entry,
// or kNoSameDexMapFound if no such entry exists.
size_t FindEntryWithTheSameDexMap();
@@ -175,6 +177,7 @@ class StackMapStream : public ValueObject {
void CheckCodeInfo(MemoryRegion region) const;
ArenaAllocator* allocator_;
+ const InstructionSet instruction_set_;
ArenaVector<StackMapEntry> stack_maps_;
// A catalog of unique [location_kind, register_value] pairs (per method).
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 967fd96561..f68695bcbc 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -16,6 +16,7 @@
#include "stack_map.h"
+#include "art_method.h"
#include "base/arena_bit_vector.h"
#include "stack_map_stream.h"
@@ -46,7 +47,7 @@ using Kind = DexRegisterLocation::Kind;
TEST(StackMapTest, Test1) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArenaBitVector sp_mask(&arena, 0, false);
size_t number_of_dex_registers = 2;
@@ -77,7 +78,7 @@ TEST(StackMapTest, Test1) {
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask));
@@ -127,7 +128,8 @@ TEST(StackMapTest, Test1) {
TEST(StackMapTest, Test2) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
+ ArtMethod art_method;
ArenaBitVector sp_mask1(&arena, 0, true);
sp_mask1.SetBit(2);
@@ -137,9 +139,9 @@ TEST(StackMapTest, Test2) {
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
- stream.BeginInlineInfoEntry(82, 3, kDirect, number_of_dex_registers_in_inline_info);
+ stream.BeginInlineInfoEntry(&art_method, 3, number_of_dex_registers_in_inline_info);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(42, 2, kStatic, number_of_dex_registers_in_inline_info);
+ stream.BeginInlineInfoEntry(&art_method, 2, number_of_dex_registers_in_inline_info);
stream.EndInlineInfoEntry();
stream.EndStackMapEntry();
@@ -191,7 +193,7 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask1));
@@ -238,12 +240,10 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
ASSERT_EQ(2u, inline_info.GetDepth(encoding.inline_info_encoding));
- ASSERT_EQ(82u, inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kDirect, inline_info.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kStatic, inline_info.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
}
// Second stack map.
@@ -252,7 +252,7 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u, encoding)));
ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0xFFu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask2));
@@ -306,7 +306,7 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding)));
ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0xABu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask3));
@@ -360,7 +360,7 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding)));
ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0xCDu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask4));
@@ -412,7 +412,7 @@ TEST(StackMapTest, Test2) {
TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 2;
@@ -442,7 +442,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
@@ -491,7 +491,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 1024;
@@ -554,7 +554,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
TEST(StackMapTest, TestShareDexRegisterMap) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 2;
@@ -612,7 +612,7 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
TEST(StackMapTest, TestNoDexRegisterMap) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 0;
@@ -620,7 +620,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
stream.EndStackMapEntry();
number_of_dex_registers = 1;
- stream.BeginStackMapEntry(1, 67, 0x4, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(1, 68, 0x4, &sp_mask, number_of_dex_registers, 0);
stream.EndStackMapEntry();
size_t size = stream.PrepareForFillIn();
@@ -641,7 +641,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
@@ -649,9 +649,9 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
stack_map = code_info.GetStackMapAt(1, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1, encoding)));
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(67, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68, encoding)));
ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
- ASSERT_EQ(67u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(68u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
ASSERT_EQ(0x4u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
@@ -661,7 +661,8 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
TEST(StackMapTest, InlineTest) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- StackMapStream stream(&arena);
+ StackMapStream stream(&arena, kRuntimeISA);
+ ArtMethod art_method;
ArenaBitVector sp_mask1(&arena, 0, true);
sp_mask1.SetBit(2);
@@ -672,10 +673,10 @@ TEST(StackMapTest, InlineTest) {
stream.AddDexRegisterEntry(Kind::kInStack, 0);
stream.AddDexRegisterEntry(Kind::kConstant, 4);
- stream.BeginInlineInfoEntry(42, 2, kStatic, 1);
+ stream.BeginInlineInfoEntry(&art_method, 2, 1);
stream.AddDexRegisterEntry(Kind::kInStack, 8);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(82, 3, kStatic, 3);
+ stream.BeginInlineInfoEntry(&art_method, 3, 3);
stream.AddDexRegisterEntry(Kind::kInStack, 16);
stream.AddDexRegisterEntry(Kind::kConstant, 20);
stream.AddDexRegisterEntry(Kind::kInRegister, 15);
@@ -688,15 +689,15 @@ TEST(StackMapTest, InlineTest) {
stream.AddDexRegisterEntry(Kind::kInStack, 56);
stream.AddDexRegisterEntry(Kind::kConstant, 0);
- stream.BeginInlineInfoEntry(42, 2, kDirect, 1);
+ stream.BeginInlineInfoEntry(&art_method, 2, 1);
stream.AddDexRegisterEntry(Kind::kInStack, 12);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(82, 3, kStatic, 3);
+ stream.BeginInlineInfoEntry(&art_method, 3, 3);
stream.AddDexRegisterEntry(Kind::kInStack, 80);
stream.AddDexRegisterEntry(Kind::kConstant, 10);
stream.AddDexRegisterEntry(Kind::kInRegister, 5);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(52, 5, kVirtual, 0);
+ stream.BeginInlineInfoEntry(&art_method, 5, 0);
stream.EndInlineInfoEntry();
stream.EndStackMapEntry();
@@ -712,12 +713,12 @@ TEST(StackMapTest, InlineTest) {
stream.AddDexRegisterEntry(Kind::kInStack, 56);
stream.AddDexRegisterEntry(Kind::kConstant, 0);
- stream.BeginInlineInfoEntry(42, 2, kVirtual, 0);
+ stream.BeginInlineInfoEntry(&art_method, 2, 0);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(52, 5, kInterface, 1);
+ stream.BeginInlineInfoEntry(&art_method, 5, 1);
stream.AddDexRegisterEntry(Kind::kInRegister, 2);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(52, 10, kStatic, 2);
+ stream.BeginInlineInfoEntry(&art_method, 10, 2);
stream.AddDexRegisterEntry(Kind::kNone, 0);
stream.AddDexRegisterEntry(Kind::kInRegister, 3);
stream.EndInlineInfoEntry();
@@ -743,11 +744,9 @@ TEST(StackMapTest, InlineTest) {
InlineInfo if0 = ci.GetInlineInfoOf(sm0, encoding);
ASSERT_EQ(2u, if0.GetDepth(encoding.inline_info_encoding));
ASSERT_EQ(2u, if0.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, if0.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kStatic, if0.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(3u, if0.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(82u, if0.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kStatic, if0.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, encoding, 1);
ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
@@ -769,14 +768,11 @@ TEST(StackMapTest, InlineTest) {
InlineInfo if1 = ci.GetInlineInfoOf(sm1, encoding);
ASSERT_EQ(3u, if1.GetDepth(encoding.inline_info_encoding));
ASSERT_EQ(2u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, if1.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kDirect, if1.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(3u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(82u, if1.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kStatic, if1.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
ASSERT_EQ(5u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(52u, if1.GetMethodIndexAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(kVirtual, if1.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 2));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 2));
DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, encoding, 1);
ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
@@ -810,14 +806,11 @@ TEST(StackMapTest, InlineTest) {
InlineInfo if2 = ci.GetInlineInfoOf(sm3, encoding);
ASSERT_EQ(3u, if2.GetDepth(encoding.inline_info_encoding));
ASSERT_EQ(2u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, if2.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kVirtual, if2.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(5u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(52u, if2.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kInterface, if2.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
ASSERT_EQ(10u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(52u, if2.GetMethodIndexAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(kStatic, if2.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 2));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 2));
ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(encoding.inline_info_encoding, 0));
@@ -830,4 +823,20 @@ TEST(StackMapTest, InlineTest) {
}
}
+TEST(StackMapTest, CodeOffsetTest) {
+ // Test minimum alignments, encoding, and decoding.
+ CodeOffset offset_thumb2 = CodeOffset::FromOffset(kThumb2InstructionAlignment, kThumb2);
+ CodeOffset offset_arm64 = CodeOffset::FromOffset(kArm64InstructionAlignment, kArm64);
+ CodeOffset offset_x86 = CodeOffset::FromOffset(kX86InstructionAlignment, kX86);
+ CodeOffset offset_x86_64 = CodeOffset::FromOffset(kX86_64InstructionAlignment, kX86_64);
+ CodeOffset offset_mips = CodeOffset::FromOffset(kMipsInstructionAlignment, kMips);
+ CodeOffset offset_mips64 = CodeOffset::FromOffset(kMips64InstructionAlignment, kMips64);
+ EXPECT_EQ(offset_thumb2.Uint32Value(kThumb2), kThumb2InstructionAlignment);
+ EXPECT_EQ(offset_arm64.Uint32Value(kArm64), kArm64InstructionAlignment);
+ EXPECT_EQ(offset_x86.Uint32Value(kX86), kX86InstructionAlignment);
+ EXPECT_EQ(offset_x86_64.Uint32Value(kX86_64), kX86_64InstructionAlignment);
+ EXPECT_EQ(offset_mips.Uint32Value(kMips), kMipsInstructionAlignment);
+ EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment);
+}
+
} // namespace art