summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/optimizing/builder.cc93
-rw-r--r--compiler/optimizing/builder.h6
-rw-r--r--compiler/optimizing/code_generator.cc18
-rw-r--r--compiler/optimizing/code_generator.h7
-rw-r--r--compiler/optimizing/code_generator_arm.cc67
-rw-r--r--compiler/optimizing/code_generator_arm.h6
-rw-r--r--compiler/optimizing/code_generator_arm64.cc7
-rw-r--r--compiler/optimizing/code_generator_arm64.h6
-rw-r--r--compiler/optimizing/code_generator_mips.cc51
-rw-r--r--compiler/optimizing/code_generator_mips.h6
-rw-r--r--compiler/optimizing/code_generator_mips64.cc51
-rw-r--r--compiler/optimizing/code_generator_mips64.h6
-rw-r--r--compiler/optimizing/code_generator_x86.cc40
-rw-r--r--compiler/optimizing/code_generator_x86.h6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc23
-rw-r--r--compiler/optimizing/code_generator_x86_64.h6
-rw-r--r--compiler/optimizing/inliner.cc6
-rw-r--r--compiler/optimizing/inliner.h4
-rw-r--r--compiler/optimizing/nodes.cc4
-rw-r--r--compiler/optimizing/nodes.h26
-rw-r--r--compiler/optimizing/optimizing_compiler.cc18
-rw-r--r--compiler/optimizing/sharpening.cc134
-rw-r--r--compiler/optimizing/sharpening.h58
24 files changed, 488 insertions, 162 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 960134f819..1b3fcc6b17 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -87,6 +87,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/primitive_type_propagation.cc \
optimizing/reference_type_propagation.cc \
optimizing/register_allocator.cc \
+ optimizing/sharpening.cc \
optimizing/side_effects_analysis.cc \
optimizing/ssa_builder.cc \
optimizing/ssa_liveness_analysis.cc \
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 5dd5be3259..6f61954d46 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -774,11 +774,12 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
&string_init_offset);
// Replace calls to String.<init> with StringFactory.
if (is_string_init) {
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = ComputeDispatchInfo(is_string_init,
- string_init_offset,
- target_method,
- direct_method,
- direct_code);
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
+ HInvokeStaticOrDirect::MethodLoadKind::kStringInit,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ dchecked_integral_cast<uint64_t>(string_init_offset),
+ 0U
+ };
HInvoke* invoke = new (arena_) HInvokeStaticOrDirect(
arena_,
number_of_arguments - 1,
@@ -841,11 +842,12 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
clinit_check = ProcessClinitCheckForInvoke(dex_pc, method_idx, &clinit_check_requirement);
}
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = ComputeDispatchInfo(is_string_init,
- string_init_offset,
- target_method,
- direct_method,
- direct_code);
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0U
+ };
invoke = new (arena_) HInvokeStaticOrDirect(arena_,
number_of_arguments,
return_type,
@@ -958,77 +960,6 @@ HClinitCheck* HGraphBuilder::ProcessClinitCheckForInvoke(
return clinit_check;
}
-HInvokeStaticOrDirect::DispatchInfo HGraphBuilder::ComputeDispatchInfo(
- bool is_string_init,
- int32_t string_init_offset,
- MethodReference target_method,
- uintptr_t direct_method,
- uintptr_t direct_code) {
- HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
- HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
- uint64_t method_load_data = 0u;
- uint64_t direct_code_ptr = 0u;
-
- if (is_string_init) {
- // TODO: Use direct_method and direct_code for the appropriate StringFactory method.
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kStringInit;
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
- method_load_data = string_init_offset;
- } else if (target_method.dex_file == outer_compilation_unit_->GetDexFile() &&
- target_method.dex_method_index == outer_compilation_unit_->GetDexMethodIndex()) {
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
- } else {
- if (direct_method != 0u) { // Should we use a direct pointer to the method?
- if (direct_method != static_cast<uintptr_t>(-1)) { // Is the method pointer known now?
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
- method_load_data = direct_method;
- } else { // The direct pointer will be known at link time.
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup;
- }
- } else { // Use dex cache.
- DCHECK(target_method.dex_file == dex_compilation_unit_->GetDexFile());
- DexCacheArraysLayout layout =
- compiler_driver_->GetDexCacheArraysLayout(target_method.dex_file);
- if (layout.Valid()) { // Can we use PC-relative access to the dex cache arrays?
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
- method_load_data = layout.MethodOffset(target_method.dex_method_index);
- } else { // We must go through the ArtMethod's pointer to resolved methods.
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
- }
- }
- if (direct_code != 0u) { // Should we use a direct pointer to the code?
- if (direct_code != static_cast<uintptr_t>(-1)) { // Is the code pointer known now?
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
- direct_code_ptr = direct_code;
- } else if (compiler_driver_->IsImage() ||
- target_method.dex_file == dex_compilation_unit_->GetDexFile()) {
- // Use PC-relative calls for invokes within a multi-dex oat file.
- // TODO: Recognize when the target dex file is within the current oat file for
- // app compilation. At the moment we recognize only the boot image as multi-dex.
- // NOTE: This will require changing the ARM backend which currently falls
- // through from kCallPCRelative to kDirectCodeFixup for different dex files.
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative;
- } else { // The direct pointer will be known at link time.
- // NOTE: This is used for app->boot calls when compiling an app against
- // a relocatable but not yet relocated image.
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup;
- }
- } else { // We must use the code pointer from the ArtMethod.
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
- }
- }
-
- if (graph_->IsDebuggable()) {
- // For debuggable apps always use the code pointer from ArtMethod
- // so that we don't circumvent instrumentation stubs if installed.
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
- }
-
- return HInvokeStaticOrDirect::DispatchInfo {
- method_load_kind, code_ptr_location, method_load_data, direct_code_ptr };
-}
-
bool HGraphBuilder::SetupInvokeArguments(HInvoke* invoke,
uint32_t number_of_vreg_arguments,
uint32_t* args,
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 6910d5195c..9eaa4b62c5 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -276,12 +276,6 @@ class HGraphBuilder : public ValueObject {
uint32_t dex_pc,
HInvoke* invoke);
- HInvokeStaticOrDirect::DispatchInfo ComputeDispatchInfo(bool is_string_init,
- int32_t string_init_offset,
- MethodReference target_method,
- uintptr_t direct_method,
- uintptr_t direct_code);
-
bool SetupInvokeArguments(HInvoke* invoke,
uint32_t number_of_vreg_arguments,
uint32_t* args,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 1c62dfa859..a1bb5e0838 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -379,13 +379,17 @@ void CodeGenerator::CreateCommonInvokeLocationSummary(
if (invoke->IsInvokeStaticOrDirect()) {
HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
- if (call->IsStringInit()) {
- locations->AddTemp(visitor->GetMethodLocation());
- } else if (call->IsRecursive()) {
- locations->SetInAt(call->GetCurrentMethodInputIndex(), visitor->GetMethodLocation());
- } else {
- locations->AddTemp(visitor->GetMethodLocation());
- locations->SetInAt(call->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+ switch (call->GetMethodLoadKind()) {
+ case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
+ locations->SetInAt(call->GetCurrentMethodInputIndex(), visitor->GetMethodLocation());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod:
+ locations->AddTemp(visitor->GetMethodLocation());
+ locations->SetInAt(call->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+ break;
+ default:
+ locations->AddTemp(visitor->GetMethodLocation());
+ break;
}
} else {
locations->AddTemp(visitor->GetMethodLocation());
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b04dfc00b2..47b6f30450 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -172,6 +172,7 @@ class CodeGenerator {
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGenerator() {}
+ // Get the graph. This is the outermost graph, never the graph of a method being inlined.
HGraph* GetGraph() const { return graph_; }
HBasicBlock* GetNextBlockToEmit() const;
@@ -431,6 +432,12 @@ class CodeGenerator {
uint32_t dex_pc,
SlowPathCode* slow_path) = 0;
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) = 0;
+
// Generate a call to a static or direct method.
virtual void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) = 0;
// Generate a call to a virtual method.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 92a5878476..8d9794bd79 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5155,26 +5155,51 @@ void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instr
}
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) {
+ if (desired_dispatch_info.method_load_kind ==
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative) {
+ // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0u
+ };
+ }
+ if (desired_dispatch_info.code_ptr_location ==
+ HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative) {
+ const DexFile& outer_dex_file = GetGraph()->GetDexFile();
+ if (&outer_dex_file != target_method.dex_file) {
+ // Calls across dex files are more likely to exceed the available BL range,
+ // so use absolute patch with fixup if available and kCallArtMethod otherwise.
+ HInvokeStaticOrDirect::CodePtrLocation code_ptr_location =
+ (desired_dispatch_info.method_load_kind ==
+ HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup)
+ ? HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup
+ : HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ return HInvokeStaticOrDirect::DispatchInfo {
+ desired_dispatch_info.method_load_kind,
+ code_ptr_location,
+ desired_dispatch_info.method_load_data,
+ 0u
+ };
+ }
+ }
+ return desired_dispatch_info;
+}
+
void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// For better instruction scheduling we load the direct code pointer before the method pointer.
- bool direct_code_loaded = false;
switch (invoke->GetCodePtrLocation()) {
- case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- if (IsSameDexFile(*invoke->GetTargetMethod().dex_file, GetGraph()->GetDexFile())) {
- break;
- }
- // Calls across dex files are more likely to exceed the available BL range,
- // so use absolute patch by falling through to kDirectCodeFixup.
- FALLTHROUGH_INTENDED;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
// LR = code address from literal pool with link-time patch.
__ LoadLiteral(LR, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
- direct_code_loaded = true;
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
// LR = invoke->GetDirectCodePtr();
__ LoadImmediate(LR, invoke->GetDirectCodePtr());
- direct_code_loaded = true;
break;
default:
break;
@@ -5197,8 +5222,10 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
- FALLTHROUGH_INTENDED;
+ // TODO: Implement this type.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
Register method_reg;
@@ -5227,20 +5254,14 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
__ bl(GetFrameEntryLabel());
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- if (!direct_code_loaded) {
- relative_call_patches_.emplace_back(invoke->GetTargetMethod());
- __ Bind(&relative_call_patches_.back().label);
- Label label;
- __ bl(&label); // Arbitrarily branch to the instruction after BL, override at link time.
- __ Bind(&label);
- break;
- }
- // If we loaded the direct code above, fall through.
- FALLTHROUGH_INTENDED;
+ relative_call_patches_.emplace_back(invoke->GetTargetMethod());
+ __ Bind(&relative_call_patches_.back().label);
+ // Arbitrarily branch to the BL itself, override at link time.
+ __ bl(&relative_call_patches_.back().label);
+ break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
// LR prepared above for better instruction scheduling.
- DCHECK(direct_code_loaded);
// LR()
__ blx(LR);
break;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 6900933e87..cef1095c5d 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -362,6 +362,12 @@ class CodeGeneratorARM : public CodeGenerator {
Label* GetFrameEntryLabel() { return &frame_entry_label_; }
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f68b11b504..602d56898f 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2786,6 +2786,13 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codege
return false;
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method ATTRIBUTE_UNUSED) {
+ // On arm64 we support all dispatch types.
+ return desired_dispatch_info;
+}
+
void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// For better instruction scheduling we load the direct code pointer before the method pointer.
bool direct_code_loaded = false;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a068b48797..f5093581e2 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -388,6 +388,12 @@ class CodeGeneratorARM64 : public CodeGenerator {
return false;
}
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 4404aa3289..0101574025 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -2951,6 +2951,37 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen
return false;
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method ATTRIBUTE_UNUSED) {
+ switch (desired_dispatch_info.method_load_kind) {
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+ // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0u
+ };
+ default:
+ break;
+ }
+ switch (desired_dispatch_info.code_ptr_location) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ desired_dispatch_info.method_load_kind,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ desired_dispatch_info.method_load_data,
+ 0u
+ };
+ default:
+ return desired_dispatch_info;
+ }
+}
+
void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// All registers are assumed to be correctly set up per the calling convention.
@@ -2970,13 +3001,11 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke
__ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
- // TODO: Implement this type. (Needs literal support.) At the moment, the
- // CompilerDriver will not direct the backend to use this type for MIPS.
- LOG(FATAL) << "Unsupported!";
- UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
- FALLTHROUGH_INTENDED;
+ // TODO: Implement these types.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
Register reg = temp.AsRegister<Register>();
@@ -3017,12 +3046,12 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke
__ Jalr(T9);
__ Nop();
break;
- case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
- FALLTHROUGH_INTENDED;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
- // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
- FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement these types.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// T9 = callee_method->entry_point_from_quick_compiled_code_;
__ LoadFromOffset(kLoadDoubleword,
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index a571e76933..059131dcfc 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -332,6 +332,12 @@ class CodeGeneratorMIPS : public CodeGenerator {
return type == Primitive::kPrimLong;
}
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateVirtualCall(HInvokeVirtual* invoke ATTRIBUTE_UNUSED,
Location temp ATTRIBUTE_UNUSED) OVERRIDE {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 5f78285b69..55efd5f9de 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2528,6 +2528,37 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codeg
return false;
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method ATTRIBUTE_UNUSED) {
+ switch (desired_dispatch_info.method_load_kind) {
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+ // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0u
+ };
+ default:
+ break;
+ }
+ switch (desired_dispatch_info.code_ptr_location) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ desired_dispatch_info.method_load_kind,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ desired_dispatch_info.method_load_data,
+ 0u
+ };
+ default:
+ return desired_dispatch_info;
+ }
+}
+
void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// All registers are assumed to be correctly set up per the calling convention.
@@ -2547,13 +2578,11 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
__ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
- // TODO: Implement this type. (Needs literal support.) At the moment, the
- // CompilerDriver will not direct the backend to use this type for MIPS.
- LOG(FATAL) << "Unsupported!";
- UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
- FALLTHROUGH_INTENDED;
+ // TODO: Implement these types.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
GpuRegister reg = temp.AsRegister<GpuRegister>();
@@ -2593,12 +2622,12 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
// LR()
__ Jalr(T9);
break;
- case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
- FALLTHROUGH_INTENDED;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
- // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
- FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement these types.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// T9 = callee_method->entry_point_from_quick_compiled_code_;
__ LoadFromOffset(kLoadDoubleword,
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index df3fc0d1e9..9bbd02759a 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -326,6 +326,12 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const { return false; }
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke ATTRIBUTE_UNUSED,
Location temp ATTRIBUTE_UNUSED) OVERRIDE {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 963eec2529..0df7e3b30a 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3757,6 +3757,34 @@ void InstructionCodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) {
}
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method ATTRIBUTE_UNUSED) {
+ if (desired_dispatch_info.method_load_kind ==
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative) {
+ // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0u
+ };
+ }
+ switch (desired_dispatch_info.code_ptr_location) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
+ // (Though the direct CALL ptr16:32 is available for consideration).
+ return HInvokeStaticOrDirect::DispatchInfo {
+ desired_dispatch_info.method_load_kind,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ desired_dispatch_info.method_load_data,
+ 0u
+ };
+ default:
+ return desired_dispatch_info;
+ }
+}
void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
@@ -3777,8 +3805,10 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
__ Bind(&method_patches_.back().label); // Bind the label at the end of the "movl" insn.
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
- FALLTHROUGH_INTENDED;
+ // TODO: Implement this type.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
Register method_reg;
@@ -3814,9 +3844,9 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
}
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
- // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
- // (Though the direct CALL ptr16:32 is available for consideration).
- FALLTHROUGH_INTENDED;
+ // Filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// (callee_method + offset_of_quick_compiled_code)()
__ call(Address(callee_method.AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index fdfc5ab69b..ac3d06c23d 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -333,6 +333,12 @@ class CodeGeneratorX86 : public CodeGenerator {
// Helper method to move a 64bits value between two locations.
void Move64(Location destination, Location source);
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
// Generate a call to a static or direct method.
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
// Generate a call to a virtual method.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ed2e4ca87c..5218d70995 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -473,6 +473,24 @@ inline Condition X86_64FPCondition(IfCondition cond) {
UNREACHABLE();
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method ATTRIBUTE_UNUSED) {
+ switch (desired_dispatch_info.code_ptr_location) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ desired_dispatch_info.method_load_kind,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ desired_dispatch_info.method_load_data,
+ 0u
+ };
+ default:
+ return desired_dispatch_info;
+ }
+}
+
void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
Location temp) {
// All registers are assumed to be correctly set up.
@@ -539,8 +557,9 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
}
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
- // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
- FALLTHROUGH_INTENDED;
+ // Filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// (callee_method + offset_of_quick_compiled_code)()
__ call(Address(callee_method.AsRegister<CpuRegister>(),
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index dc86a48ce7..fc485f5bb6 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -335,6 +335,12 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return false;
}
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index e2aca3091f..0aaa6b3f2c 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -32,6 +32,7 @@
#include "optimizing_compiler.h"
#include "reference_type_propagation.h"
#include "register_allocator.h"
+#include "sharpening.h"
#include "ssa_phi_elimination.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
@@ -396,12 +397,14 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
HDeadCodeElimination dce(callee_graph, stats_);
HConstantFolding fold(callee_graph);
ReferenceTypePropagation type_propagation(callee_graph, handles_);
+ HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
InstructionSimplifier simplify(callee_graph, stats_);
IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_);
HOptimization* optimizations[] = {
&intrinsics,
&type_propagation,
+ &sharpening,
&simplify,
&dce,
&fold,
@@ -415,6 +418,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
size_t number_of_instructions_budget = kMaximumNumberOfHInstructions;
if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) {
HInliner inliner(callee_graph,
+ codegen_,
outer_compilation_unit_,
dex_compilation_unit,
compiler_driver_,
@@ -484,7 +488,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
return false;
}
- if (!same_dex_file && current->NeedsDexCache()) {
+ if (!same_dex_file && current->NeedsDexCacheOfDeclaringClass()) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be inlined because " << current->DebugName()
<< " it is in a different dex file and requires access to the dex cache";
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index bce5915219..0f6a9453be 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -22,6 +22,7 @@
namespace art {
+class CodeGenerator;
class CompilerDriver;
class DexCompilationUnit;
class HGraph;
@@ -31,6 +32,7 @@ class OptimizingCompilerStats;
class HInliner : public HOptimization {
public:
HInliner(HGraph* outer_graph,
+ CodeGenerator* codegen,
const DexCompilationUnit& outer_compilation_unit,
const DexCompilationUnit& caller_compilation_unit,
CompilerDriver* compiler_driver,
@@ -40,6 +42,7 @@ class HInliner : public HOptimization {
: HOptimization(outer_graph, kInlinerPassName, stats),
outer_compilation_unit_(outer_compilation_unit),
caller_compilation_unit_(caller_compilation_unit),
+ codegen_(codegen),
compiler_driver_(compiler_driver),
depth_(depth),
number_of_inlined_instructions_(0),
@@ -57,6 +60,7 @@ class HInliner : public HOptimization {
const DexCompilationUnit& outer_compilation_unit_;
const DexCompilationUnit& caller_compilation_unit_;
+ CodeGenerator* const codegen_;
CompilerDriver* const compiler_driver_;
const size_t depth_;
size_t number_of_inlined_instructions_;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 98c3096cae..348026551e 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1911,8 +1911,8 @@ bool HInvoke::NeedsEnvironment() const {
return !opt.GetDoesNotNeedEnvironment();
}
-bool HInvokeStaticOrDirect::NeedsDexCache() const {
- if (IsRecursive() || IsStringInit()) {
+bool HInvokeStaticOrDirect::NeedsDexCacheOfDeclaringClass() const {
+ if (GetMethodLoadKind() != MethodLoadKind::kDexCacheViaMethod) {
return false;
}
if (!IsIntrinsic()) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7cf6339b6e..2ef3217b92 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1965,7 +1965,9 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return NeedsEnvironment() || IsLoadClass() || IsLoadString();
}
- virtual bool NeedsDexCache() const { return false; }
+ // Returns whether the code generation of the instruction will require to have access
+ // to the dex cache of the current method's declaring class via the current method.
+ virtual bool NeedsDexCacheOfDeclaringClass() const { return false; }
// Does this instruction have any use in an environment before
// control flow hits 'other'?
@@ -3353,15 +3355,15 @@ class HInvokeStaticOrDirect : public HInvoke {
};
struct DispatchInfo {
- const MethodLoadKind method_load_kind;
- const CodePtrLocation code_ptr_location;
+ MethodLoadKind method_load_kind;
+ CodePtrLocation code_ptr_location;
// The method load data holds
// - thread entrypoint offset for kStringInit method if this is a string init invoke.
// Note that there are multiple string init methods, each having its own offset.
// - the method address for kDirectAddress
// - the dex cache arrays offset for kDexCachePcRel.
- const uint64_t method_load_data;
- const uint64_t direct_code_ptr;
+ uint64_t method_load_data;
+ uint64_t direct_code_ptr;
};
HInvokeStaticOrDirect(ArenaAllocator* arena,
@@ -3390,6 +3392,10 @@ class HInvokeStaticOrDirect : public HInvoke {
target_method_(target_method),
dispatch_info_(dispatch_info) {}
+ void SetDispatchInfo(const DispatchInfo& dispatch_info) {
+ dispatch_info_ = dispatch_info;
+ }
+
bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
@@ -3404,11 +3410,13 @@ class HInvokeStaticOrDirect : public HInvoke {
MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
- bool NeedsDexCache() const OVERRIDE;
+ bool NeedsDexCacheOfDeclaringClass() const OVERRIDE;
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
uint32_t GetCurrentMethodInputIndex() const { return GetNumberOfArguments(); }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kDirectAddress; }
- bool HasPcRelDexCache() const { return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative; }
+ bool HasPcRelDexCache() const {
+ return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative;
+ }
bool HasDirectCodePtr() const { return GetCodePtrLocation() == CodePtrLocation::kCallDirect; }
MethodReference GetTargetMethod() const { return target_method_; }
@@ -4721,7 +4729,7 @@ class HLoadClass : public HExpression<1> {
const DexFile& GetDexFile() { return dex_file_; }
- bool NeedsDexCache() const OVERRIDE { return !is_referrers_class_; }
+ bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return !is_referrers_class_; }
static SideEffects SideEffectsForArchRuntimeCalls() {
return SideEffects::CanTriggerGC();
@@ -4763,7 +4771,7 @@ class HLoadString : public HExpression<1> {
// TODO: Can we deopt or debug when we resolve a string?
bool NeedsEnvironment() const OVERRIDE { return false; }
- bool NeedsDexCache() const OVERRIDE { return true; }
+ bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return true; }
bool CanBeNull() const OVERRIDE { return false; }
static SideEffects SideEffectsForArchRuntimeCalls() {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 8d7b8a94b7..51d5923326 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -62,6 +62,7 @@
#include "prepare_for_register_allocation.h"
#include "reference_type_propagation.h"
#include "register_allocator.h"
+#include "sharpening.h"
#include "side_effects_analysis.h"
#include "ssa_builder.h"
#include "ssa_phi_elimination.h"
@@ -377,6 +378,7 @@ static void RunOptimizations(HOptimization* optimizations[],
}
static void MaybeRunInliner(HGraph* graph,
+ CodeGenerator* codegen,
CompilerDriver* driver,
OptimizingCompilerStats* stats,
const DexCompilationUnit& dex_compilation_unit,
@@ -391,7 +393,7 @@ static void MaybeRunInliner(HGraph* graph,
ArenaAllocator* arena = graph->GetArena();
HInliner* inliner = new (arena) HInliner(
- graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
+ graph, codegen, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
ReferenceTypePropagation* type_propagation =
new (arena) ReferenceTypePropagation(graph, handles,
"reference_type_propagation_after_inlining");
@@ -444,6 +446,7 @@ static void RunArchOptimizations(InstructionSet instruction_set,
}
static void RunOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
CompilerDriver* driver,
OptimizingCompilerStats* stats,
const DexCompilationUnit& dex_compilation_unit,
@@ -465,6 +468,7 @@ static void RunOptimizations(HGraph* graph,
BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, induction);
ReferenceTypePropagation* type_propagation =
new (arena) ReferenceTypePropagation(graph, handles);
+ HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_after_types");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
@@ -479,6 +483,7 @@ static void RunOptimizations(HGraph* graph,
fold1,
simplify1,
type_propagation,
+ sharpening,
dce1,
simplify2
};
@@ -500,7 +505,7 @@ static void RunOptimizations(HGraph* graph,
RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
} else {
- MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles);
+ MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles);
HOptimization* optimizations2[] = {
// BooleanSimplifier depends on the InstructionSimplifier removing
@@ -574,8 +579,13 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
ScopedObjectAccess soa(Thread::Current());
StackHandleScopeCollection handles(soa.Self());
soa.Self()->TransitionFromRunnableToSuspended(kNative);
- RunOptimizations(graph, compiler_driver, compilation_stats_.get(),
- dex_compilation_unit, pass_observer, &handles);
+ RunOptimizations(graph,
+ codegen,
+ compiler_driver,
+ compilation_stats_.get(),
+ dex_compilation_unit,
+ pass_observer,
+ &handles);
AllocateRegisters(graph, codegen, pass_observer);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
new file mode 100644
index 0000000000..649496478a
--- /dev/null
+++ b/compiler/optimizing/sharpening.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sharpening.h"
+
+#include "code_generator.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
+#include "driver/compiler_driver.h"
+#include "nodes.h"
+
+namespace art {
+
+void HSharpening::Run() {
+ // We don't care about the order of the blocks here.
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInvokeStaticOrDirect()) {
+ ProcessInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect());
+ }
+ // TODO: Move the sharpening of invoke-virtual/-interface/-super from HGraphBuilder
+ // here. Rewrite it to avoid the CompilerDriver's reliance on verifier data
+ // because we know the type better when inlining.
+ // TODO: HLoadClass, HLoadString - select PC relative dex cache array access if
+ // available.
+ }
+ }
+}
+
+void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ if (invoke->IsStringInit()) {
+ // Not using the dex cache arrays. But we could still try to use a better dispatch...
+ // TODO: Use direct_method and direct_code for the appropriate StringFactory method.
+ return;
+ }
+
+ // TODO: Avoid CompilerDriver.
+ InvokeType invoke_type = invoke->GetOriginalInvokeType();
+ MethodReference target_method(&graph_->GetDexFile(), invoke->GetDexMethodIndex());
+ int vtable_idx;
+ uintptr_t direct_code, direct_method;
+ bool success = compiler_driver_->ComputeInvokeInfo(
+ &compilation_unit_,
+ invoke->GetDexPc(),
+ false /* update_stats: already updated in builder */,
+ true /* enable_devirtualization */,
+ &invoke_type,
+ &target_method,
+ &vtable_idx,
+ &direct_code,
+ &direct_method);
+ DCHECK(success);
+ DCHECK_EQ(invoke_type, invoke->GetInvokeType());
+ DCHECK_EQ(target_method.dex_file, invoke->GetTargetMethod().dex_file);
+ DCHECK_EQ(target_method.dex_method_index, invoke->GetTargetMethod().dex_method_index);
+
+ HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
+ HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
+ uint64_t method_load_data = 0u;
+ uint64_t direct_code_ptr = 0u;
+
+ HGraph* outer_graph = codegen_->GetGraph();
+ if (target_method.dex_file == &outer_graph->GetDexFile() &&
+ target_method.dex_method_index == outer_graph->GetMethodIdx()) {
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
+ } else {
+ if (direct_method != 0u) { // Should we use a direct pointer to the method?
+ if (direct_method != static_cast<uintptr_t>(-1)) { // Is the method pointer known now?
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
+ method_load_data = direct_method;
+ } else { // The direct pointer will be known at link time.
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup;
+ }
+ } else { // Use dex cache.
+ DCHECK_EQ(target_method.dex_file, &graph_->GetDexFile());
+ DexCacheArraysLayout layout =
+ compiler_driver_->GetDexCacheArraysLayout(target_method.dex_file);
+ if (layout.Valid()) { // Can we use PC-relative access to the dex cache arrays?
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
+ method_load_data = layout.MethodOffset(target_method.dex_method_index);
+ } else { // We must go through the ArtMethod's pointer to resolved methods.
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
+ }
+ }
+ if (direct_code != 0u) { // Should we use a direct pointer to the code?
+ if (direct_code != static_cast<uintptr_t>(-1)) { // Is the code pointer known now?
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
+ direct_code_ptr = direct_code;
+ } else if (compiler_driver_->IsImage() ||
+ target_method.dex_file == &graph_->GetDexFile()) {
+ // Use PC-relative calls for invokes within a multi-dex oat file.
+ // TODO: Recognize when the target dex file is within the current oat file for
+ // app compilation. At the moment we recognize only the boot image as multi-dex.
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative;
+ } else { // The direct pointer will be known at link time.
+ // NOTE: This is used for app->boot calls when compiling an app against
+ // a relocatable but not yet relocated image.
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup;
+ }
+ } else { // We must use the code pointer from the ArtMethod.
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ }
+ }
+
+ if (graph_->IsDebuggable()) {
+ // For debuggable apps always use the code pointer from ArtMethod
+ // so that we don't circumvent instrumentation stubs if installed.
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ }
+
+ HInvokeStaticOrDirect::DispatchInfo desired_dispatch_info = {
+ method_load_kind, code_ptr_location, method_load_data, direct_code_ptr
+ };
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info =
+ codegen_->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info,
+ invoke->GetTargetMethod());
+ invoke->SetDispatchInfo(dispatch_info);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
new file mode 100644
index 0000000000..adae7007dd
--- /dev/null
+++ b/compiler/optimizing/sharpening.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_SHARPENING_H_
+#define ART_COMPILER_OPTIMIZING_SHARPENING_H_
+
+#include "optimization.h"
+
+namespace art {
+
+class CodeGenerator;
+class CompilerDriver;
+class DexCompilationUnit;
+class HInvokeStaticOrDirect;
+
+// Optimization that tries to improve the way we dispatch methods and access types,
+// fields, etc. Besides actual method sharpening based on receiver type (for example
+// virtual->direct), this includes selecting the best available dispatch for
+// invoke-static/-direct based on code generator support.
+class HSharpening : public HOptimization {
+ public:
+ HSharpening(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& compilation_unit,
+ CompilerDriver* compiler_driver)
+ : HOptimization(graph, kSharpeningPassName),
+ codegen_(codegen),
+ compilation_unit_(compilation_unit),
+ compiler_driver_(compiler_driver) { }
+
+ void Run() OVERRIDE;
+
+ static constexpr const char* kSharpeningPassName = "sharpening";
+
+ private:
+ void ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke);
+
+ CodeGenerator* codegen_;
+ const DexCompilationUnit& compilation_unit_;
+ CompilerDriver* compiler_driver_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_SHARPENING_H_