diff options
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/builder.cc | 150 | ||||
-rw-r--r-- | compiler/optimizing/builder.h | 8 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 12 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 11 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 12 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 17 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 13 | ||||
-rw-r--r-- | compiler/optimizing/graph_visualizer.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/inliner.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 15 | ||||
-rw-r--r-- | compiler/optimizing/sharpening.cc | 16 |
12 files changed, 189 insertions, 76 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index d7754e8ea9..8e75bdcdc9 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -735,6 +735,79 @@ static InvokeType GetInvokeTypeFromOpCode(Instruction::Code opcode) { } } +ArtMethod* HGraphBuilder::ResolveMethod(uint16_t method_idx, InvokeType invoke_type) { + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<2> hs(soa.Self()); + + ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker(); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle( + soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader()))); + Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass())); + + ArtMethod* resolved_method = class_linker->ResolveMethod( + *dex_compilation_unit_->GetDexFile(), + method_idx, + dex_compilation_unit_->GetDexCache(), + class_loader, + /* referrer */ nullptr, + invoke_type); + + if (UNLIKELY(resolved_method == nullptr)) { + // Clean up any exception left by type resolution. + soa.Self()->ClearException(); + return nullptr; + } + + // Check access. The class linker has a fast path for looking into the dex cache + // and does not check the access if it hits it. + if (compiling_class.Get() == nullptr) { + if (!resolved_method->IsPublic()) { + return nullptr; + } + } else if (!compiling_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(), + resolved_method, + dex_compilation_unit_->GetDexCache().Get(), + method_idx)) { + return nullptr; + } + + // We have to special case the invoke-super case, as ClassLinker::ResolveMethod does not. + // We need to look at the referrer's super class vtable. + if (invoke_type == kSuper) { + if (compiling_class.Get() == nullptr) { + // Invoking a super method requires knowing the actual super class. If we did not resolve + // the compiling method's declaring class (which only happens for ahead of time compilation), + // bail out. + DCHECK(Runtime::Current()->IsAotCompiler()); + return nullptr; + } + uint16_t vtable_index = resolved_method->GetMethodIndex(); + ArtMethod* actual_method = compiling_class->GetSuperClass()->GetVTableEntry( + vtable_index, class_linker->GetImagePointerSize()); + if (actual_method != resolved_method && + !IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) { + // TODO: The actual method could still be referenced in the current dex file, so we + // could try locating it. + // TODO: Remove the dex_file restriction. + return nullptr; + } + if (!actual_method->IsInvokable()) { + // Fail if the actual method cannot be invoked. Otherwise, the runtime resolution stub + // could resolve the callee to the wrong method. + return nullptr; + } + resolved_method = actual_method; + } + + // Check for incompatible class changes. The class linker has a fast path for + // looking into the dex cache and does not check incompatible class changes if it hits it. + if (resolved_method->CheckIncompatibleClassChange(invoke_type)) { + return nullptr; + } + + return resolved_method; +} + bool HGraphBuilder::BuildInvoke(const Instruction& instruction, uint32_t dex_pc, uint32_t method_idx, @@ -742,22 +815,18 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, bool is_range, uint32_t* args, uint32_t register_index) { - InvokeType original_invoke_type = GetInvokeTypeFromOpCode(instruction.Opcode()); - InvokeType optimized_invoke_type = original_invoke_type; + InvokeType invoke_type = GetInvokeTypeFromOpCode(instruction.Opcode()); const char* descriptor = dex_file_->GetMethodShorty(method_idx); Primitive::Type return_type = Primitive::GetType(descriptor[0]); // Remove the return type from the 'proto'. size_t number_of_arguments = strlen(descriptor) - 1; - if (original_invoke_type != kStatic) { // instance call + if (invoke_type != kStatic) { // instance call // One extra argument for 'this'. number_of_arguments++; } MethodReference target_method(dex_file_, method_idx); - int32_t table_index = 0; - uintptr_t direct_code = 0; - uintptr_t direct_method = 0; // Special handling for string init. int32_t string_init_offset = 0; @@ -780,7 +849,7 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, method_idx, target_method, dispatch_info, - original_invoke_type, + invoke_type, kStatic /* optimized_invoke_type */, HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit); return HandleStringInit(invoke, @@ -791,23 +860,16 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, descriptor); } - // Handle unresolved methods. - if (!compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, - dex_pc, - true /* update_stats */, - true /* enable_devirtualization */, - &optimized_invoke_type, - &target_method, - &table_index, - &direct_code, - &direct_method)) { + ArtMethod* resolved_method = ResolveMethod(method_idx, invoke_type); + + if (resolved_method == nullptr) { MaybeRecordStat(MethodCompilationStat::kUnresolvedMethod); HInvoke* invoke = new (arena_) HInvokeUnresolved(arena_, number_of_arguments, return_type, dex_pc, method_idx, - original_invoke_type); + invoke_type); return HandleInvoke(invoke, number_of_vreg_arguments, args, @@ -817,21 +879,26 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, nullptr /* clinit_check */); } - // Handle resolved methods (non string init). - - DCHECK(optimized_invoke_type != kSuper); - // Potential class initialization check, in the case of a static method call. HClinitCheck* clinit_check = nullptr; HInvoke* invoke = nullptr; - if (optimized_invoke_type == kDirect || optimized_invoke_type == kStatic) { + if (invoke_type == kDirect || invoke_type == kStatic || invoke_type == kSuper) { // By default, consider that the called method implicitly requires // an initialization check of its declaring method. HInvokeStaticOrDirect::ClinitCheckRequirement clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit; - if (optimized_invoke_type == kStatic) { - clinit_check = ProcessClinitCheckForInvoke(dex_pc, method_idx, &clinit_check_requirement); + ScopedObjectAccess soa(Thread::Current()); + if (invoke_type == kStatic) { + clinit_check = ProcessClinitCheckForInvoke( + dex_pc, resolved_method, method_idx, &clinit_check_requirement); + } else if (invoke_type == kSuper) { + if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) { + // Update the target method to the one resolved. Note that this may be a no-op if + // we resolved to the method referenced by the instruction. + method_idx = resolved_method->GetDexMethodIndex(); + target_method = MethodReference(dex_file_, method_idx); + } } HInvokeStaticOrDirect::DispatchInfo dispatch_info = { @@ -847,24 +914,26 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, method_idx, target_method, dispatch_info, - original_invoke_type, - optimized_invoke_type, + invoke_type, + invoke_type, clinit_check_requirement); - } else if (optimized_invoke_type == kVirtual) { + } else if (invoke_type == kVirtual) { + ScopedObjectAccess soa(Thread::Current()); // Needed for the method index invoke = new (arena_) HInvokeVirtual(arena_, number_of_arguments, return_type, dex_pc, method_idx, - table_index); + resolved_method->GetMethodIndex()); } else { - DCHECK_EQ(optimized_invoke_type, kInterface); + DCHECK_EQ(invoke_type, kInterface); + ScopedObjectAccess soa(Thread::Current()); // Needed for the method index invoke = new (arena_) HInvokeInterface(arena_, number_of_arguments, return_type, dex_pc, method_idx, - table_index); + resolved_method->GetDexMethodIndex()); } return HandleInvoke(invoke, @@ -962,23 +1031,18 @@ bool HGraphBuilder::IsInitialized(Handle<mirror::Class> cls) const { HClinitCheck* HGraphBuilder::ProcessClinitCheckForInvoke( uint32_t dex_pc, + ArtMethod* resolved_method, uint32_t method_idx, HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) { - ScopedObjectAccess soa(Thread::Current()); - StackHandleScope<5> hs(soa.Self()); + const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile(); + Thread* self = Thread::Current(); + StackHandleScope<4> hs(self); Handle<mirror::DexCache> dex_cache(hs.NewHandle( dex_compilation_unit_->GetClassLinker()->FindDexCache( - soa.Self(), *dex_compilation_unit_->GetDexFile()))); - Handle<mirror::ClassLoader> class_loader(hs.NewHandle( - soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader()))); - ArtMethod* resolved_method = compiler_driver_->ResolveMethod( - soa, dex_cache, class_loader, dex_compilation_unit_, method_idx, InvokeType::kStatic); - - DCHECK(resolved_method != nullptr); - - const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile(); + self, *dex_compilation_unit_->GetDexFile()))); Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle( - outer_compilation_unit_->GetClassLinker()->FindDexCache(soa.Self(), outer_dex_file))); + outer_compilation_unit_->GetClassLinker()->FindDexCache( + self, outer_dex_file))); Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass())); Handle<mirror::Class> resolved_method_class(hs.NewHandle(resolved_method->GetDeclaringClass())); diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h index 5ada93f684..c3979f3dd1 100644 --- a/compiler/optimizing/builder.h +++ b/compiler/optimizing/builder.h @@ -305,8 +305,10 @@ class HGraphBuilder : public ValueObject { HClinitCheck* ProcessClinitCheckForInvoke( uint32_t dex_pc, + ArtMethod* method, uint32_t method_idx, - HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement); + HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) + SHARED_REQUIRES(Locks::mutator_lock_); // Build a HNewInstance instruction. bool BuildNewInstance(uint16_t type_index, uint32_t dex_pc); @@ -315,6 +317,10 @@ class HGraphBuilder : public ValueObject { bool IsInitialized(Handle<mirror::Class> cls) const SHARED_REQUIRES(Locks::mutator_lock_); + // Try to resolve a method using the class linker. Return null if a method could + // not be resolved. + ArtMethod* ResolveMethod(uint16_t method_idx, InvokeType invoke_type); + ArenaAllocator* const arena_; // A list of the size of the dex code holding block information for diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 76bf951a47..ac6b5e823a 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -5973,12 +5973,16 @@ void CodeGeneratorARM::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp Register temp = temp_location.AsRegister<Register>(); uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( invoke->GetVTableIndex(), kArmPointerSize).Uint32Value(); - LocationSummary* locations = invoke->GetLocations(); - Location receiver = locations->InAt(0); + + // Use the calling convention instead of the location of the receiver, as + // intrinsics may have put the receiver in a different register. In the intrinsics + // slow path, the arguments have been moved to the right place, so here we are + // guaranteed that the receiver is the first register of the calling convention. + InvokeDexCallingConvention calling_convention; + Register receiver = calling_convention.GetRegisterAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - DCHECK(receiver.IsRegister()); // /* HeapReference<Class> */ temp = receiver->klass_ - __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); + __ LoadFromOffset(kLoadWord, temp, receiver, class_offset); MaybeRecordImplicitNullCheck(invoke); // Instead of simply (possibly) unpoisoning `temp` here, we should // emit a read barrier for the previous class reference load. diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index ac16268834..04acd9d32c 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -3618,8 +3618,12 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok } void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) { - LocationSummary* locations = invoke->GetLocations(); - Location receiver = locations->InAt(0); + // Use the calling convention instead of the location of the receiver, as + // intrinsics may have put the receiver in a different register. In the intrinsics + // slow path, the arguments have been moved to the right place, so here we are + // guaranteed that the receiver is the first register of the calling convention. + InvokeDexCallingConvention calling_convention; + Register receiver = calling_convention.GetRegisterAt(0); Register temp = XRegisterFrom(temp_in); size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( invoke->GetVTableIndex(), kArm64PointerSize).SizeValue(); @@ -3630,11 +3634,10 @@ void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location te DCHECK(receiver.IsRegister()); // /* HeapReference<Class> */ temp = receiver->klass_ - __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset)); + __ Ldr(temp.W(), HeapOperandFrom(LocationFrom(receiver), class_offset)); MaybeRecordImplicitNullCheck(invoke); // Instead of simply (possibly) unpoisoning `temp` here, we should // emit a read barrier for the previous class reference load. - // However this is not required in practice, as this is an // intermediate/temporary reference and because the current // concurrent copying collector keeps the from-space memory // intact/accessible until the end of the marking phase (the diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 934f24bfb0..bc5eb31405 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -2986,8 +2986,13 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDi } void CodeGeneratorMIPS64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) { - LocationSummary* locations = invoke->GetLocations(); - Location receiver = locations->InAt(0); + // Use the calling convention instead of the location of the receiver, as + // intrinsics may have put the receiver in a different register. In the intrinsics + // slow path, the arguments have been moved to the right place, so here we are + // guaranteed that the receiver is the first register of the calling convention. + InvokeDexCallingConvention calling_convention; + GpuRegister receiver = calling_convention.GetRegisterAt(0); + GpuRegister temp = temp_location.AsRegister<GpuRegister>(); size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( invoke->GetVTableIndex(), kMips64PointerSize).SizeValue(); @@ -2995,8 +3000,7 @@ void CodeGeneratorMIPS64::GenerateVirtualCall(HInvokeVirtual* invoke, Location t Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize); // temp = object->GetClass(); - DCHECK(receiver.IsRegister()); - __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset); + __ LoadFromOffset(kLoadUnsignedWord, temp, receiver, class_offset); MaybeRecordImplicitNullCheck(invoke); // temp = temp->GetMethodAt(method_offset); __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index a0d31dad04..2fb87d3029 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -1969,6 +1969,11 @@ void InstructionCodeGeneratorX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirec } void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { + IntrinsicLocationsBuilderX86 intrinsic(codegen_); + if (intrinsic.TryDispatch(invoke)) { + return; + } + HandleInvoke(invoke); } @@ -4150,12 +4155,16 @@ void CodeGeneratorX86::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp Register temp = temp_in.AsRegister<Register>(); uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( invoke->GetVTableIndex(), kX86PointerSize).Uint32Value(); - LocationSummary* locations = invoke->GetLocations(); - Location receiver = locations->InAt(0); + + // Use the calling convention instead of the location of the receiver, as + // intrinsics may have put the receiver in a different register. In the intrinsics + // slow path, the arguments have been moved to the right place, so here we are + // guaranteed that the receiver is the first register of the calling convention. + InvokeDexCallingConvention calling_convention; + Register receiver = calling_convention.GetRegisterAt(0); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - DCHECK(receiver.IsRegister()); // /* HeapReference<Class> */ temp = receiver->klass_ - __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset)); + __ movl(temp, Address(receiver, class_offset)); MaybeRecordImplicitNullCheck(invoke); // Instead of simply (possibly) unpoisoning `temp` here, we should // emit a read barrier for the previous class reference load. diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 534ee1c5ab..4618be9cc3 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -802,12 +802,17 @@ void CodeGeneratorX86_64::GenerateVirtualCall(HInvokeVirtual* invoke, Location t CpuRegister temp = temp_in.AsRegister<CpuRegister>(); size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( invoke->GetVTableIndex(), kX86_64PointerSize).SizeValue(); - LocationSummary* locations = invoke->GetLocations(); - Location receiver = locations->InAt(0); + + // Use the calling convention instead of the location of the receiver, as + // intrinsics may have put the receiver in a different register. In the intrinsics + // slow path, the arguments have been moved to the right place, so here we are + // guaranteed that the receiver is the first register of the calling convention. + InvokeDexCallingConvention calling_convention; + Register receiver = calling_convention.GetRegisterAt(0); + size_t class_offset = mirror::Object::ClassOffset().SizeValue(); - DCHECK(receiver.IsRegister()); // /* HeapReference<Class> */ temp = receiver->klass_ - __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset)); + __ movl(temp, Address(CpuRegister(receiver), class_offset)); MaybeRecordImplicitNullCheck(invoke); // Instead of simply (possibly) unpoisoning `temp` here, we should // emit a read barrier for the previous class reference load. diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index 48bcd10b10..2bff21375e 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -402,6 +402,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { } } + void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE { + VisitInvoke(invoke); + StartAttributeStream("intrinsic") << invoke->GetIntrinsic(); + } + void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE { StartAttributeStream("field_type") << field_access->GetFieldType(); } diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 0363f203b2..6d93be37a7 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -192,6 +192,10 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) { // We can query the dex cache directly. The verifier has populated it already. ArtMethod* resolved_method; if (invoke_instruction->IsInvokeStaticOrDirect()) { + if (invoke_instruction->AsInvokeStaticOrDirect()->IsStringInit()) { + VLOG(compiler) << "Not inlining a String.<init> method"; + return false; + } MethodReference ref = invoke_instruction->AsInvokeStaticOrDirect()->GetTargetMethod(); mirror::DexCache* const dex_cache = (&caller_dex_file == ref.dex_file) ? caller_compilation_unit_.GetDexCache().Get() diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index b01324ec3b..834081188b 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -384,7 +384,7 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke, const DexFile // InvokeStaticOrDirect. InvokeType intrinsic_type = GetIntrinsicInvokeType(intrinsic); InvokeType invoke_type = invoke->IsInvokeStaticOrDirect() ? - invoke->AsInvokeStaticOrDirect()->GetInvokeType() : + invoke->AsInvokeStaticOrDirect()->GetOptimizedInvokeType() : invoke->IsInvokeVirtual() ? kVirtual : kSuper; switch (intrinsic_type) { case kStatic: diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 441aa0493a..74ff526ac7 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -3431,7 +3431,7 @@ class HInvokeStaticOrDirect : public HInvoke { MethodReference target_method, DispatchInfo dispatch_info, InvokeType original_invoke_type, - InvokeType invoke_type, + InvokeType optimized_invoke_type, ClinitCheckRequirement clinit_check_requirement) : HInvoke(arena, number_of_arguments, @@ -3445,7 +3445,7 @@ class HInvokeStaticOrDirect : public HInvoke { dex_pc, method_index, original_invoke_type), - invoke_type_(invoke_type), + optimized_invoke_type_(optimized_invoke_type), clinit_check_requirement_(clinit_check_requirement), target_method_(target_method), dispatch_info_(dispatch_info) { } @@ -3491,7 +3491,11 @@ class HInvokeStaticOrDirect : public HInvoke { // platform-specific special input, such as PC-relative addressing base. uint32_t GetSpecialInputIndex() const { return GetNumberOfArguments(); } - InvokeType GetInvokeType() const { return invoke_type_; } + InvokeType GetOptimizedInvokeType() const { return optimized_invoke_type_; } + void SetOptimizedInvokeType(InvokeType invoke_type) { + optimized_invoke_type_ = invoke_type; + } + MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; } CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; } bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; } @@ -3514,6 +3518,7 @@ class HInvokeStaticOrDirect : public HInvoke { } bool HasDirectCodePtr() const { return GetCodePtrLocation() == CodePtrLocation::kCallDirect; } MethodReference GetTargetMethod() const { return target_method_; } + void SetTargetMethod(MethodReference method) { target_method_ = method; } int32_t GetStringInitOffset() const { DCHECK(IsStringInit()); @@ -3539,7 +3544,7 @@ class HInvokeStaticOrDirect : public HInvoke { // Is this instruction a call to a static method? bool IsStatic() const { - return GetInvokeType() == kStatic; + return GetOriginalInvokeType() == kStatic; } // Remove the HClinitCheck or the replacement HLoadClass (set as last input by @@ -3612,7 +3617,7 @@ class HInvokeStaticOrDirect : public HInvoke { void RemoveInputAt(size_t index); private: - const InvokeType invoke_type_; + InvokeType optimized_invoke_type_; ClinitCheckRequirement clinit_check_requirement_; // The target method may refer to different dex file or method index than the original // invoke. This happens for sharpened calls and for calls where a method was redeclared diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index a128079cdb..5e1d1d9954 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -49,7 +49,8 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { } // TODO: Avoid CompilerDriver. - InvokeType invoke_type = invoke->GetOriginalInvokeType(); + InvokeType original_invoke_type = invoke->GetOriginalInvokeType(); + InvokeType optimized_invoke_type = original_invoke_type; MethodReference target_method(&graph_->GetDexFile(), invoke->GetDexMethodIndex()); int vtable_idx; uintptr_t direct_code, direct_method; @@ -58,15 +59,18 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { invoke->GetDexPc(), false /* update_stats: already updated in builder */, true /* enable_devirtualization */, - &invoke_type, + &optimized_invoke_type, &target_method, &vtable_idx, &direct_code, &direct_method); - DCHECK(success); - DCHECK_EQ(invoke_type, invoke->GetInvokeType()); - DCHECK_EQ(target_method.dex_file, invoke->GetTargetMethod().dex_file); - DCHECK_EQ(target_method.dex_method_index, invoke->GetTargetMethod().dex_method_index); + if (!success) { + // TODO: try using kDexCachePcRelative. It's always a valid method load + // kind as long as it's supported by the codegen + return; + } + invoke->SetOptimizedInvokeType(optimized_invoke_type); + invoke->SetTargetMethod(target_method); HInvokeStaticOrDirect::MethodLoadKind method_load_kind; HInvokeStaticOrDirect::CodePtrLocation code_ptr_location; |