Pass a full MethodReference of the invoke in HInvoke nodes.
Cleanup to ensure we don't make mistakes when passing a dex method index
to the HInvoke constructor, and we know which dex file it relates to.
Test: test.py
Change-Id: I625949add88a6b97e1dafeb7aed37961e105d6aa
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 901424f..4f1ab75 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -573,7 +573,8 @@
void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
- MoveConstant(temp, invoke->GetDexMethodIndex());
+ MethodReference method_reference(invoke->GetMethodReference());
+ MoveConstant(temp, method_reference.index);
// The access check is unnecessary but we do not want to introduce
// extra entrypoints for the codegens that do not support some
@@ -602,7 +603,8 @@
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
}
void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
- MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetDexMethodIndex());
+ MethodReference method_reference(invoke->GetMethodReference());
+ MoveConstant(invoke->GetLocations()->GetTemp(0), method_reference.index);
// Initialize to anything to silent compiler warnings.
QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index ced16fb..9b9a878 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4350,7 +4350,7 @@
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope scratch_scope(masm);
scratch_scope.Exclude(ip1);
- __ Mov(ip1, invoke->GetDexMethodIndex());
+ __ Mov(ip1, invoke->GetMethodReference().index);
__ Ldr(temp,
MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value()));
@@ -4436,11 +4436,12 @@
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
// Add ADRP with its PC-relative method patch.
- vixl::aarch64::Label* adrp_label = NewBootImageMethodPatch(invoke->GetTargetMethod());
+ vixl::aarch64::Label* adrp_label =
+ NewBootImageMethodPatch(invoke->GetResolvedMethodReference());
EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
// Add ADD with its PC-relative method patch.
vixl::aarch64::Label* add_label =
- NewBootImageMethodPatch(invoke->GetTargetMethod(), adrp_label);
+ NewBootImageMethodPatch(invoke->GetResolvedMethodReference(), adrp_label);
EmitAddPlaceholder(add_label, XRegisterFrom(temp), XRegisterFrom(temp));
break;
}
@@ -4457,12 +4458,11 @@
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
// Add ADRP with its PC-relative .bss entry patch.
- MethodReference target_method(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex());
- vixl::aarch64::Label* adrp_label = NewMethodBssEntryPatch(target_method);
+ vixl::aarch64::Label* adrp_label = NewMethodBssEntryPatch(invoke->GetMethodReference());
EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
// Add LDR with its PC-relative .bss entry patch.
vixl::aarch64::Label* ldr_label =
- NewMethodBssEntryPatch(target_method, adrp_label);
+ NewMethodBssEntryPatch(invoke->GetMethodReference(), adrp_label);
// All aligned loads are implicitly atomic consume operations on ARM64.
EmitLdrOffsetPlaceholder(ldr_label, XRegisterFrom(temp), XRegisterFrom(temp));
break;
@@ -4811,7 +4811,7 @@
if (GetCompilerOptions().IsBootImage()) {
DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
// Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
- MethodReference target_method = invoke->GetTargetMethod();
+ MethodReference target_method = invoke->GetResolvedMethodReference();
dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
// Add ADRP with its PC-relative type patch.
vixl::aarch64::Label* adrp_label = NewBootImageTypePatch(*target_method.dex_file, type_idx);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 5c6f835..dce6528 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3515,7 +3515,7 @@
// internally for the duration of the macro instruction.
UseScratchRegisterScope temps(GetVIXLAssembler());
temps.Exclude(hidden_reg);
- __ Mov(hidden_reg, invoke->GetDexMethodIndex());
+ __ Mov(hidden_reg, invoke->GetMethodReference().index);
}
{
// Ensure the pc position is recorded immediately after the `blx` instruction.
@@ -9070,7 +9070,7 @@
break;
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
- PcRelativePatchInfo* labels = NewBootImageMethodPatch(invoke->GetTargetMethod());
+ PcRelativePatchInfo* labels = NewBootImageMethodPatch(invoke->GetResolvedMethodReference());
vixl32::Register temp_reg = RegisterFrom(temp);
EmitMovwMovtPlaceholder(labels, temp_reg);
break;
@@ -9084,8 +9084,7 @@
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
- PcRelativePatchInfo* labels = NewMethodBssEntryPatch(
- MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
+ PcRelativePatchInfo* labels = NewMethodBssEntryPatch(invoke->GetMethodReference());
vixl32::Register temp_reg = RegisterFrom(temp);
EmitMovwMovtPlaceholder(labels, temp_reg);
// All aligned loads are implicitly atomic consume operations on ARM.
@@ -9350,7 +9349,7 @@
if (GetCompilerOptions().IsBootImage()) {
DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
// Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
- MethodReference target_method = invoke->GetTargetMethod();
+ MethodReference target_method = invoke->GetResolvedMethodReference();
dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
PcRelativePatchInfo* labels = NewBootImageTypePatch(*target_method.dex_file, type_idx);
EmitMovwMovtPlaceholder(labels, argument);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d38e3e2..4e19941 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2580,7 +2580,7 @@
// Set the hidden argument. This is safe to do this here, as XMM7
// won't be modified thereafter, before the `call` instruction.
DCHECK_EQ(XMM7, hidden_reg);
- __ movl(temp, Immediate(invoke->GetDexMethodIndex()));
+ __ movl(temp, Immediate(invoke->GetMethodReference().index));
__ movd(hidden_reg, temp);
if (receiver.IsStackSlot()) {
@@ -5312,16 +5312,21 @@
HX86ComputeBaseMethodAddress* method_address =
invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
boot_image_method_patches_.emplace_back(
- method_address, invoke->GetTargetMethod().dex_file, invoke->GetTargetMethod().index);
+ method_address,
+ invoke->GetResolvedMethodReference().dex_file,
+ invoke->GetResolvedMethodReference().index);
__ Bind(&boot_image_method_patches_.back().label);
}
void CodeGeneratorX86::RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke) {
+ DCHECK(IsSameDexFile(GetGraph()->GetDexFile(), *invoke->GetMethodReference().dex_file));
HX86ComputeBaseMethodAddress* method_address =
invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
// Add the patch entry and bind its label at the end of the instruction.
method_bss_entry_patches_.emplace_back(
- method_address, &GetGraph()->GetDexFile(), invoke->GetDexMethodIndex());
+ method_address,
+ invoke->GetMethodReference().dex_file,
+ invoke->GetMethodReference().index);
__ Bind(&method_bss_entry_patches_.back().label);
}
@@ -5399,7 +5404,7 @@
Register method_address_reg =
invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
__ leal(argument, Address(method_address_reg, CodeGeneratorX86::kPlaceholder32BitOffset));
- MethodReference target_method = invoke->GetTargetMethod();
+ MethodReference target_method = invoke->GetResolvedMethodReference();
dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
boot_image_type_patches_.emplace_back(method_address, target_method.dex_file, type_idx.index_);
__ Bind(&boot_image_type_patches_.back().label);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0ed982a..210b379 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1143,13 +1143,15 @@
}
void CodeGeneratorX86_64::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke) {
- boot_image_method_patches_.emplace_back(
- invoke->GetTargetMethod().dex_file, invoke->GetTargetMethod().index);
+ boot_image_method_patches_.emplace_back(invoke->GetResolvedMethodReference().dex_file,
+ invoke->GetResolvedMethodReference().index);
__ Bind(&boot_image_method_patches_.back().label);
}
void CodeGeneratorX86_64::RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke) {
- method_bss_entry_patches_.emplace_back(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex());
+ DCHECK(IsSameDexFile(GetGraph()->GetDexFile(), *invoke->GetMethodReference().dex_file));
+ method_bss_entry_patches_.emplace_back(invoke->GetMethodReference().dex_file,
+ invoke->GetMethodReference().index);
__ Bind(&method_bss_entry_patches_.back().label);
}
@@ -1205,7 +1207,7 @@
// Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
__ leal(argument,
Address::Absolute(CodeGeneratorX86_64::kPlaceholder32BitOffset, /* no_rip= */ false));
- MethodReference target_method = invoke->GetTargetMethod();
+ MethodReference target_method = invoke->GetResolvedMethodReference();
dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
boot_image_type_patches_.emplace_back(target_method.dex_file, type_idx.index_);
__ Bind(&boot_image_type_patches_.back().label);
@@ -2743,7 +2745,7 @@
// won't be modified thereafter, before the `call` instruction.
// We also di it after MaybeGenerateInlineCache that may use RAX.
DCHECK_EQ(RAX, hidden_reg.AsRegister());
- codegen_->Load64BitValue(hidden_reg, invoke->GetDexMethodIndex());
+ codegen_->Load64BitValue(hidden_reg, invoke->GetMethodReference().index);
// temp = temp->GetAddressOfIMT()
__ movq(temp,
diff --git a/compiler/optimizing/critical_native_abi_fixup_arm.cc b/compiler/optimizing/critical_native_abi_fixup_arm.cc
index 8441423..94a9730 100644
--- a/compiler/optimizing/critical_native_abi_fixup_arm.cc
+++ b/compiler/optimizing/critical_native_abi_fixup_arm.cc
@@ -69,7 +69,7 @@
/*number_of_arguments=*/ 1u,
converted_type,
invoke->GetDexPc(),
- /*method_index=*/ dex::kDexNoIndex,
+ /*method_reference=*/ MethodReference(nullptr, dex::kDexNoIndex),
resolved_method,
dispatch_info,
kStatic,
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 922a6f6..d5840fc 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -456,7 +456,7 @@
}
void VisitInvoke(HInvoke* invoke) override {
- StartAttributeStream("dex_file_index") << invoke->GetDexMethodIndex();
+ StartAttributeStream("dex_file_index") << invoke->GetMethodReference().index;
ArtMethod* method = invoke->GetResolvedMethod();
// We don't print signatures, which conflict with c1visualizer format.
static constexpr bool kWithSignature = false;
@@ -464,7 +464,7 @@
// other invokes might be coming from inlined methods.
ScopedObjectAccess soa(Thread::Current());
std::string method_name = (method == nullptr)
- ? GetGraph()->GetDexFile().PrettyMethod(invoke->GetDexMethodIndex(), kWithSignature)
+ ? invoke->GetMethodReference().PrettyMethod(kWithSignature)
: method->PrettyMethod(kWithSignature);
StartAttributeStream("method_name") << method_name;
StartAttributeStream("always_throws") << std::boolalpha
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 4530f1d..b9f40a0 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -173,8 +173,8 @@
if (call != nullptr && call->GetIntrinsic() == Intrinsics::kNone) {
if (honor_noinline_directives) {
// Debugging case: directives in method names control or assert on inlining.
- std::string callee_name = outer_compilation_unit_.GetDexFile()->PrettyMethod(
- call->GetDexMethodIndex(), /* with_signature= */ false);
+ std::string callee_name =
+ call->GetMethodReference().PrettyMethod(/* with_signature= */ false);
// Tests prevent inlining by having $noinline$ in their method names.
if (callee_name.find("$noinline$") == std::string::npos) {
if (TryInline(call)) {
@@ -209,8 +209,9 @@
* the actual runtime target of an interface or virtual call.
* Return nullptr if the runtime target cannot be proven.
*/
-static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method)
+static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* resolved_method = invoke->GetResolvedMethod();
if (IsMethodOrDeclaringClassFinal(resolved_method)) {
// No need to lookup further, the resolved method will be the target.
return resolved_method;
@@ -438,19 +439,16 @@
}
ArtMethod* HInliner::FindActualCallTarget(HInvoke* invoke_instruction, bool* cha_devirtualize) {
- ArtMethod* resolved_method = invoke_instruction->GetResolvedMethod();
- DCHECK(resolved_method != nullptr);
-
ArtMethod* actual_method = nullptr;
if (invoke_instruction->IsInvokeStaticOrDirect()) {
- actual_method = resolved_method;
+ actual_method = invoke_instruction->GetResolvedMethod();
} else {
// Check if we can statically find the method.
- actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
+ actual_method = FindVirtualOrInterfaceTarget(invoke_instruction);
}
if (actual_method == nullptr) {
- ArtMethod* method = TryCHADevirtualization(resolved_method);
+ ArtMethod* method = TryCHADevirtualization(invoke_instruction->GetResolvedMethod());
if (method != nullptr) {
*cha_devirtualize = true;
actual_method = method;
@@ -478,9 +476,7 @@
}
ScopedObjectAccess soa(Thread::Current());
- uint32_t method_index = invoke_instruction->GetDexMethodIndex();
- const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
- LOG_TRY() << caller_dex_file.PrettyMethod(method_index);
+ LOG_TRY() << invoke_instruction->GetMethodReference().PrettyMethod();
ArtMethod* resolved_method = invoke_instruction->GetResolvedMethod();
if (resolved_method == nullptr) {
@@ -496,8 +492,7 @@
// If we didn't find a method, see if we can inline from the inline caches.
if (actual_method == nullptr) {
DCHECK(!invoke_instruction->IsInvokeStaticOrDirect());
-
- return TryInlineFromInlineCache(caller_dex_file, invoke_instruction, resolved_method);
+ return TryInlineFromInlineCache(invoke_instruction);
}
// Single target.
@@ -574,9 +569,7 @@
// We may come from the interpreter and it may have seen different receiver types.
return Runtime::Current()->IsAotCompiler() || outermost_graph_->IsCompilingOsr();
}
-bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
- HInvoke* invoke_instruction,
- ArtMethod* resolved_method)
+bool HInliner::TryInlineFromInlineCache(HInvoke* invoke_instruction)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->IsAotCompiler() && !kUseAOTInlineCaches) {
return false;
@@ -588,21 +581,21 @@
// for it.
InlineCacheType inline_cache_type =
(Runtime::Current()->IsAotCompiler() || Runtime::Current()->IsZygote())
- ? GetInlineCacheAOT(caller_dex_file, invoke_instruction, &hs, &inline_cache)
+ ? GetInlineCacheAOT(invoke_instruction, &hs, &inline_cache)
: GetInlineCacheJIT(invoke_instruction, &hs, &inline_cache);
switch (inline_cache_type) {
case kInlineCacheNoData: {
LOG_FAIL_NO_STAT()
<< "No inline cache information for call to "
- << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex());
+ << invoke_instruction->GetMethodReference().PrettyMethod();
return false;
}
case kInlineCacheUninitialized: {
LOG_FAIL_NO_STAT()
<< "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << invoke_instruction->GetMethodReference().PrettyMethod()
<< " is not hit and not inlined";
return false;
}
@@ -610,21 +603,21 @@
case kInlineCacheMonomorphic: {
MaybeRecordStat(stats_, MethodCompilationStat::kMonomorphicCall);
if (UseOnlyPolymorphicInliningWithNoDeopt()) {
- return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
+ return TryInlinePolymorphicCall(invoke_instruction, inline_cache);
} else {
- return TryInlineMonomorphicCall(invoke_instruction, resolved_method, inline_cache);
+ return TryInlineMonomorphicCall(invoke_instruction, inline_cache);
}
}
case kInlineCachePolymorphic: {
MaybeRecordStat(stats_, MethodCompilationStat::kPolymorphicCall);
- return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
+ return TryInlinePolymorphicCall(invoke_instruction, inline_cache);
}
case kInlineCacheMegamorphic: {
LOG_FAIL_NO_STAT()
<< "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << invoke_instruction->GetMethodReference().PrettyMethod()
<< " is megamorphic and not inlined";
MaybeRecordStat(stats_, MethodCompilationStat::kMegamorphicCall);
return false;
@@ -633,7 +626,7 @@
case kInlineCacheMissingTypes: {
LOG_FAIL_NO_STAT()
<< "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << invoke_instruction->GetMethodReference().PrettyMethod()
<< " is missing types and not inlined";
return false;
}
@@ -671,7 +664,6 @@
}
HInliner::InlineCacheType HInliner::GetInlineCacheAOT(
- const DexFile& caller_dex_file,
HInvoke* invoke_instruction,
StackHandleScope<1>* hs,
/*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
@@ -683,7 +675,7 @@
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> offline_profile =
pci->GetHotMethodInfo(MethodReference(
- &caller_dex_file, caller_compilation_unit_.GetDexMethodIndex()));
+ caller_compilation_unit_.GetDexFile(), caller_compilation_unit_.GetDexMethodIndex()));
if (offline_profile == nullptr) {
return kInlineCacheNoData; // no profile information for this invocation.
}
@@ -762,8 +754,8 @@
inline_cache->Set(ic_index++, clazz);
} else {
VLOG(compiler) << "Could not resolve class from inline cache in AOT mode "
- << caller_compilation_unit_.GetDexFile()->PrettyMethod(
- invoke_instruction->GetDexMethodIndex()) << " : "
+ << invoke_instruction->GetMethodReference().PrettyMethod()
+ << " : "
<< caller_compilation_unit_
.GetDexFile()->StringByTypeIdx(class_ref.type_index);
return kInlineCacheMissingTypes;
@@ -793,10 +785,10 @@
}
static ArtMethod* ResolveMethodFromInlineCache(Handle<mirror::Class> klass,
- ArtMethod* resolved_method,
- HInstruction* invoke_instruction,
+ HInvoke* invoke_instruction,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* resolved_method = invoke_instruction->GetResolvedMethod();
if (Runtime::Current()->IsAotCompiler()) {
// We can get unrelated types when working with profiles (corruption,
// systme updates, or anyone can write to it). So first check if the class
@@ -819,7 +811,6 @@
}
bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
- ArtMethod* resolved_method,
Handle<mirror::ObjectArray<mirror::Class>> classes) {
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
@@ -828,7 +819,7 @@
GetMonomorphicType(classes), caller_compilation_unit_);
if (!class_index.IsValid()) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedDexCache)
- << "Call to " << ArtMethod::PrettyMethod(resolved_method)
+ << "Call to " << ArtMethod::PrettyMethod(invoke_instruction->GetResolvedMethod())
<< " from inline cache is not inlined because its class is not"
<< " accessible to the caller";
return false;
@@ -838,8 +829,8 @@
PointerSize pointer_size = class_linker->GetImagePointerSize();
Handle<mirror::Class> monomorphic_type =
graph_->GetHandleCache()->NewHandle(GetMonomorphicType(classes));
- resolved_method = ResolveMethodFromInlineCache(
- monomorphic_type, resolved_method, invoke_instruction, pointer_size);
+ ArtMethod* resolved_method = ResolveMethodFromInlineCache(
+ monomorphic_type, invoke_instruction, pointer_size);
LOG_NOTE() << "Try inline monomorphic call to " << resolved_method->PrettyMethod();
if (resolved_method == nullptr) {
@@ -980,12 +971,11 @@
}
bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
- ArtMethod* resolved_method,
Handle<mirror::ObjectArray<mirror::Class>> classes) {
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
- if (TryInlinePolymorphicCallToSameTarget(invoke_instruction, resolved_method, classes)) {
+ if (TryInlinePolymorphicCallToSameTarget(invoke_instruction, classes)) {
return true;
}
@@ -1001,8 +991,7 @@
ArtMethod* method = nullptr;
Handle<mirror::Class> handle = graph_->GetHandleCache()->NewHandle(classes->Get(i));
- method = ResolveMethodFromInlineCache(
- handle, resolved_method, invoke_instruction, pointer_size);
+ method = ResolveMethodFromInlineCache(handle, invoke_instruction, pointer_size);
if (method == nullptr) {
DCHECK(Runtime::Current()->IsAotCompiler());
// AOT profile is bogus. This loop expects to iterate over all entries,
@@ -1027,7 +1016,8 @@
} else {
one_target_inlined = true;
- LOG_SUCCESS() << "Polymorphic call to " << ArtMethod::PrettyMethod(resolved_method)
+ LOG_SUCCESS() << "Polymorphic call to "
+ << invoke_instruction->GetMethodReference().PrettyMethod()
<< " has inlined " << ArtMethod::PrettyMethod(method);
// If we have inlined all targets before, and this receiver is the last seen,
@@ -1060,7 +1050,7 @@
if (!one_target_inlined) {
LOG_FAIL_NO_STAT()
- << "Call to " << ArtMethod::PrettyMethod(resolved_method)
+ << "Call to " << invoke_instruction->GetMethodReference().PrettyMethod()
<< " from inline cache is not inlined because none"
<< " of its targets could be inlined";
return false;
@@ -1155,7 +1145,6 @@
bool HInliner::TryInlinePolymorphicCallToSameTarget(
HInvoke* invoke_instruction,
- ArtMethod* resolved_method,
Handle<mirror::ObjectArray<mirror::Class>> classes) {
// This optimization only works under JIT for now.
if (!codegen_->GetCompilerOptions().IsJitCompiler()) {
@@ -1165,7 +1154,6 @@
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
PointerSize pointer_size = class_linker->GetImagePointerSize();
- DCHECK(resolved_method != nullptr);
ArtMethod* actual_method = nullptr;
size_t method_index = invoke_instruction->IsInvokeVirtual()
? invoke_instruction->AsInvokeVirtual()->GetVTableIndex()
@@ -1300,7 +1288,7 @@
invoke_instruction->GetNumberOfArguments(),
invoke_instruction->GetType(),
invoke_instruction->GetDexPc(),
- invoke_instruction->GetDexMethodIndex(), // Use interface method's dex method index.
+ invoke_instruction->GetMethodReference(), // Use interface method's reference.
method,
method->GetMethodIndex());
DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
@@ -1338,9 +1326,10 @@
return false;
}
- const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
uint32_t dex_method_index = FindMethodIndexIn(
- method, caller_dex_file, invoke_instruction->GetDexMethodIndex());
+ method,
+ *invoke_instruction->GetMethodReference().dex_file,
+ invoke_instruction->GetMethodReference().index);
if (dex_method_index == dex::kDexNoIndex) {
return false;
}
@@ -1349,7 +1338,7 @@
invoke_instruction->GetNumberOfArguments(),
invoke_instruction->GetType(),
invoke_instruction->GetDexPc(),
- dex_method_index,
+ MethodReference(invoke_instruction->GetMethodReference().dex_file, dex_method_index),
method,
method->GetMethodIndex());
HInputsRef inputs = invoke_instruction->GetInputs();
@@ -1568,10 +1557,10 @@
// Try to recognize known simple patterns and replace invoke call with appropriate instructions.
bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
- ArtMethod* resolved_method,
+ ArtMethod* method,
HInstruction** return_replacement) {
InlineMethod inline_method;
- if (!InlineMethodAnalyser::AnalyseMethodCode(resolved_method, &inline_method)) {
+ if (!InlineMethodAnalyser::AnalyseMethodCode(method, &inline_method)) {
return false;
}
@@ -1585,7 +1574,7 @@
inline_method.d.return_data.arg);
break;
case kInlineOpNonWideConst:
- if (resolved_method->GetShorty()[0] == 'L') {
+ if (method->GetShorty()[0] == 'L') {
DCHECK_EQ(inline_method.d.data, 0u);
*return_replacement = graph_->GetNullConstant();
} else {
@@ -1599,7 +1588,7 @@
return false;
}
HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, data.object_arg);
- HInstanceFieldGet* iget = CreateInstanceFieldGet(data.field_idx, resolved_method, obj);
+ HInstanceFieldGet* iget = CreateInstanceFieldGet(data.field_idx, method, obj);
DCHECK_EQ(iget->GetFieldOffset().Uint32Value(), data.field_offset);
DCHECK_EQ(iget->IsVolatile() ? 1u : 0u, data.is_volatile);
invoke_instruction->GetBlock()->InsertInstructionBefore(iget, invoke_instruction);
@@ -1614,7 +1603,7 @@
}
HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, data.object_arg);
HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, data.src_arg);
- HInstanceFieldSet* iput = CreateInstanceFieldSet(data.field_idx, resolved_method, obj, value);
+ HInstanceFieldSet* iput = CreateInstanceFieldSet(data.field_idx, method, obj, value);
DCHECK_EQ(iput->GetFieldOffset().Uint32Value(), data.field_offset);
DCHECK_EQ(iput->IsVolatile() ? 1u : 0u, data.is_volatile);
invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
@@ -1657,7 +1646,7 @@
uint16_t field_index = iput_field_indexes[i];
bool is_final;
HInstanceFieldSet* iput =
- CreateInstanceFieldSet(field_index, resolved_method, obj, value, &is_final);
+ CreateInstanceFieldSet(field_index, method, obj, value, &is_final);
invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
// Check whether the field is final. If it is, we need to add a barrier.
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 6510857..9041c7a 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -118,7 +118,7 @@
// Try to recognize known simple patterns and replace invoke call with appropriate instructions.
bool TryPatternSubstitution(HInvoke* invoke_instruction,
- ArtMethod* resolved_method,
+ ArtMethod* method,
HInstruction** return_replacement)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -166,10 +166,7 @@
bool* is_final = nullptr);
// Try inlining the invoke instruction using inline caches.
- bool TryInlineFromInlineCache(
- const DexFile& caller_dex_file,
- HInvoke* invoke_instruction,
- ArtMethod* resolved_method)
+ bool TryInlineFromInlineCache(HInvoke* invoke_instruction)
REQUIRES_SHARED(Locks::mutator_lock_);
// Try getting the inline cache from JIT code cache.
@@ -184,7 +181,7 @@
// Try getting the inline cache from AOT offline profile.
// Return true if the inline cache was successfully allocated and the
// invoke info was found in the profile info.
- InlineCacheType GetInlineCacheAOT(const DexFile& caller_dex_file,
+ InlineCacheType GetInlineCacheAOT(
HInvoke* invoke_instruction,
StackHandleScope<1>* hs,
/*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
@@ -209,18 +206,15 @@
// if (receiver.getClass() != ic.GetMonomorphicType()) deopt
// ... // inlined code
bool TryInlineMonomorphicCall(HInvoke* invoke_instruction,
- ArtMethod* resolved_method,
Handle<mirror::ObjectArray<mirror::Class>> classes)
REQUIRES_SHARED(Locks::mutator_lock_);
// Try to inline targets of a polymorphic call.
bool TryInlinePolymorphicCall(HInvoke* invoke_instruction,
- ArtMethod* resolved_method,
Handle<mirror::ObjectArray<mirror::Class>> classes)
REQUIRES_SHARED(Locks::mutator_lock_);
bool TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
- ArtMethod* resolved_method,
Handle<mirror::ObjectArray<mirror::Class>> classes)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index d264bee..3401e65 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -496,7 +496,7 @@
number_of_arguments,
return_type_,
kNoDexPc,
- method_idx,
+ target_method,
method,
dispatch_info,
invoke_type,
@@ -874,7 +874,7 @@
ArtMethod* referrer,
const DexCompilationUnit& dex_compilation_unit,
/*inout*/InvokeType* invoke_type,
- /*out*/MethodReference* target_method,
+ /*out*/MethodReference* method_info,
/*out*/bool* is_string_constructor) {
ScopedObjectAccess soa(Thread::Current());
@@ -989,18 +989,18 @@
if (*invoke_type == kDirect || *invoke_type == kStatic || *invoke_type == kSuper) {
// Record the target method needed for HInvokeStaticOrDirect.
- *target_method =
+ *method_info =
MethodReference(resolved_method->GetDexFile(), resolved_method->GetDexMethodIndex());
} else if (*invoke_type == kVirtual) {
// For HInvokeVirtual we need the vtable index.
- *target_method = MethodReference(/*file=*/ nullptr, resolved_method->GetVtableIndex());
+ *method_info = MethodReference(/*file=*/ nullptr, resolved_method->GetVtableIndex());
} else if (*invoke_type == kInterface) {
// For HInvokeInterface we need the IMT index.
- *target_method = MethodReference(/*file=*/ nullptr, ImTable::GetImtIndex(resolved_method));
+ *method_info = MethodReference(/*file=*/ nullptr, ImTable::GetImtIndex(resolved_method));
} else {
// For HInvokePolymorphic we don't need the target method yet
DCHECK_EQ(*invoke_type, kPolymorphic);
- DCHECK(target_method == nullptr);
+ DCHECK(method_info == nullptr);
}
*is_string_constructor =
@@ -1024,15 +1024,16 @@
number_of_arguments++;
}
- MethodReference target_method(nullptr, 0u);
+ MethodReference method_info(nullptr, 0u);
bool is_string_constructor = false;
ArtMethod* resolved_method = ResolveMethod(method_idx,
graph_->GetArtMethod(),
*dex_compilation_unit_,
&invoke_type,
- &target_method,
+ &method_info,
&is_string_constructor);
+ MethodReference method_reference(&graph_->GetDexFile(), method_idx);
if (UNLIKELY(resolved_method == nullptr)) {
DCHECK(!Thread::Current()->IsExceptionPending());
MaybeRecordStat(compilation_stats_,
@@ -1041,7 +1042,7 @@
number_of_arguments,
return_type,
dex_pc,
- method_idx,
+ method_reference,
invoke_type);
return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ true);
}
@@ -1061,11 +1062,11 @@
number_of_arguments - 1,
/* return_type= */ DataType::Type::kReference,
dex_pc,
- method_idx,
+ method_reference,
/* resolved_method= */ nullptr,
dispatch_info,
invoke_type,
- target_method,
+ method_info,
HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit);
return HandleStringInit(invoke, operands, shorty);
}
@@ -1092,13 +1093,12 @@
HInvoke* invoke = nullptr;
if (invoke_type == kDirect || invoke_type == kStatic || invoke_type == kSuper) {
if (invoke_type == kSuper) {
- if (IsSameDexFile(*target_method.dex_file, *dex_compilation_unit_->GetDexFile())) {
+ if (IsSameDexFile(*method_info.dex_file, *dex_compilation_unit_->GetDexFile())) {
// Update the method index to the one resolved. Note that this may be a no-op if
// we resolved to the method referenced by the instruction.
- method_idx = target_method.index;
+ method_reference.index = method_info.index;
}
}
-
HInvokeStaticOrDirect::DispatchInfo dispatch_info =
HSharpening::SharpenInvokeStaticOrDirect(resolved_method, code_generator_);
if (dispatch_info.code_ptr_location ==
@@ -1109,11 +1109,11 @@
number_of_arguments,
return_type,
dex_pc,
- method_idx,
+ method_reference,
resolved_method,
dispatch_info,
invoke_type,
- target_method,
+ method_info,
clinit_check_requirement);
if (clinit_check != nullptr) {
// Add the class initialization check as last input of `invoke`.
@@ -1123,23 +1123,23 @@
invoke->SetArgumentAt(clinit_check_index, clinit_check);
}
} else if (invoke_type == kVirtual) {
- DCHECK(target_method.dex_file == nullptr);
+ DCHECK(method_info.dex_file == nullptr);
invoke = new (allocator_) HInvokeVirtual(allocator_,
number_of_arguments,
return_type,
dex_pc,
- method_idx,
+ method_reference,
resolved_method,
- /*vtable_index=*/ target_method.index);
+ /*vtable_index=*/ method_info.index);
} else {
DCHECK_EQ(invoke_type, kInterface);
invoke = new (allocator_) HInvokeInterface(allocator_,
number_of_arguments,
return_type,
dex_pc,
- method_idx,
+ method_reference,
resolved_method,
- /*imt_index=*/ target_method.index);
+ /*imt_index=*/ method_info.index);
}
return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
}
@@ -1161,17 +1161,17 @@
graph_->GetArtMethod(),
*dex_compilation_unit_,
&invoke_type,
- /* target_method= */ nullptr,
+ /* method_info= */ nullptr,
&is_string_constructor);
+ MethodReference method_reference(&graph_->GetDexFile(), method_idx);
HInvoke* invoke = new (allocator_) HInvokePolymorphic(allocator_,
number_of_arguments,
return_type,
dex_pc,
- method_idx,
+ method_reference,
resolved_method,
proto_idx);
-
if (!HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false)) {
return false;
}
@@ -1203,11 +1203,14 @@
const char* shorty = dex_file_->GetShorty(proto_idx);
DataType::Type return_type = DataType::FromShorty(shorty[0]);
size_t number_of_arguments = strlen(shorty) - 1;
+ // HInvokeCustom takes a DexNoNoIndex method reference.
+ MethodReference method_reference(&graph_->GetDexFile(), dex::kDexNoIndex);
HInvoke* invoke = new (allocator_) HInvokeCustom(allocator_,
number_of_arguments,
call_site_idx,
return_type,
- dex_pc);
+ dex_pc,
+ method_reference);
return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index bece0ac..80d4a2f 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4386,8 +4386,6 @@
// inputs at the end of their list of inputs.
uint32_t GetNumberOfArguments() const { return number_of_arguments_; }
- uint32_t GetDexMethodIndex() const { return dex_method_index_; }
-
InvokeType GetInvokeType() const {
return GetPackedField<InvokeTypeField>();
}
@@ -4430,7 +4428,9 @@
bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
ArtMethod* GetResolvedMethod() const { return resolved_method_; }
- void SetResolvedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetResolvedMethod(ArtMethod* method);
+
+ MethodReference GetMethodReference() const { return method_reference_; }
DECLARE_ABSTRACT_INSTRUCTION(Invoke);
@@ -4450,7 +4450,7 @@
uint32_t number_of_other_inputs,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index,
+ MethodReference method_reference,
ArtMethod* resolved_method,
InvokeType invoke_type)
: HVariableInputSizeInstruction(
@@ -4462,13 +4462,11 @@
number_of_arguments + number_of_other_inputs,
kArenaAllocInvokeInputs),
number_of_arguments_(number_of_arguments),
- dex_method_index_(dex_method_index),
+ method_reference_(method_reference),
intrinsic_(Intrinsics::kNone),
intrinsic_optimizations_(0) {
SetPackedField<InvokeTypeField>(invoke_type);
SetPackedFlag<kFlagCanThrow>(true);
- // Check mutator lock, constructors lack annotalysis support.
- Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current());
SetResolvedMethod(resolved_method);
}
@@ -4476,7 +4474,7 @@
uint32_t number_of_arguments_;
ArtMethod* resolved_method_;
- const uint32_t dex_method_index_;
+ const MethodReference method_reference_;
Intrinsics intrinsic_;
// A magic word holding optimizations for intrinsics. See intrinsics.h.
@@ -4489,7 +4487,7 @@
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index,
+ MethodReference method_reference,
InvokeType invoke_type)
: HInvoke(kInvokeUnresolved,
allocator,
@@ -4497,7 +4495,7 @@
/* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
- dex_method_index,
+ method_reference,
nullptr,
invoke_type) {
}
@@ -4516,7 +4514,7 @@
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index,
+ MethodReference method_reference,
// resolved_method is the ArtMethod object corresponding to the polymorphic
// method (e.g. VarHandle.get), resolved using the class linker. It is needed
// to pass intrinsic information to the HInvokePolymorphic node.
@@ -4528,7 +4526,7 @@
/* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
- dex_method_index,
+ method_reference,
resolved_method,
kPolymorphic),
proto_idx_(proto_idx) {
@@ -4551,14 +4549,15 @@
uint32_t number_of_arguments,
uint32_t call_site_index,
DataType::Type return_type,
- uint32_t dex_pc)
+ uint32_t dex_pc,
+ MethodReference method_reference)
: HInvoke(kInvokeCustom,
allocator,
number_of_arguments,
/* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
- /* dex_method_index= */ dex::kDexNoIndex,
+ method_reference,
/* resolved_method= */ nullptr,
kStatic),
call_site_index_(call_site_index) {
@@ -4647,11 +4646,11 @@
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t method_index,
+ MethodReference method_reference,
ArtMethod* resolved_method,
DispatchInfo dispatch_info,
InvokeType invoke_type,
- MethodReference target_method,
+ MethodReference resolved_method_reference,
ClinitCheckRequirement clinit_check_requirement)
: HInvoke(kInvokeStaticOrDirect,
allocator,
@@ -4662,10 +4661,10 @@
(clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u),
return_type,
dex_pc,
- method_index,
+ method_reference,
resolved_method,
invoke_type),
- target_method_(target_method),
+ resolved_method_reference_(resolved_method_reference),
dispatch_info_(dispatch_info) {
SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
}
@@ -4753,8 +4752,8 @@
return GetInvokeType() == kStatic;
}
- MethodReference GetTargetMethod() const {
- return target_method_;
+ const MethodReference GetResolvedMethodReference() const {
+ return resolved_method_reference_;
}
// Does this method load kind need the current method as an input?
@@ -4856,7 +4855,7 @@
kFieldClinitCheckRequirementSize>;
// Cached values of the resolved method, to avoid needing the mutator lock.
- const MethodReference target_method_;
+ const MethodReference resolved_method_reference_;
DispatchInfo dispatch_info_;
};
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs);
@@ -4869,7 +4868,7 @@
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index,
+ MethodReference method_reference,
ArtMethod* resolved_method,
uint32_t vtable_index)
: HInvoke(kInvokeVirtual,
@@ -4878,7 +4877,7 @@
0u,
return_type,
dex_pc,
- dex_method_index,
+ method_reference,
resolved_method,
kVirtual),
vtable_index_(vtable_index) {
@@ -4931,7 +4930,7 @@
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index,
+ MethodReference method_reference,
ArtMethod* resolved_method,
uint32_t imt_index)
: HInvoke(kInvokeInterface,
@@ -4940,7 +4939,7 @@
0u,
return_type,
dex_pc,
- dex_method_index,
+ method_reference,
resolved_method,
kInterface),
imt_index_(imt_index) {
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index fd7412c..5d80690 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -526,8 +526,8 @@
ClassLinker* cl = Runtime::Current()->GetClassLinker();
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
- const DexFile& dex_file = *invoke->GetTargetMethod().dex_file;
- uint32_t dex_method_index = invoke->GetTargetMethod().index;
+ const DexFile& dex_file = *invoke->GetResolvedMethodReference().dex_file;
+ uint32_t dex_method_index = invoke->GetResolvedMethodReference().index;
Handle<mirror::DexCache> dex_cache(
hs.NewHandle(FindDexCacheWithHint(self, dex_file, hint_dex_cache_)));
// Use a null loader, the target method is in a boot classpath dex file.